diff --git a/python/plugins/processing/otb/OTBAlgorithm.py b/python/plugins/processing/otb/OTBAlgorithm.py
index 40afaabc8545..45b8fb70ecf2 100644
--- a/python/plugins/processing/otb/OTBAlgorithm.py
+++ b/python/plugins/processing/otb/OTBAlgorithm.py
@@ -6,7 +6,13 @@
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
+ (C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
+ otb at c-s dot fr (CS SI)
+ Contributors : Victor Olaya
+ Julien Malik (CS SI) - Changing the way to load algorithms first version
+ Oscar Picas (CS SI) - Changing the way to load algorithms
+ Alexia Mondot (CS SI) - Add hdf5 support
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
@@ -20,37 +26,33 @@
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
-
# This will get replaced with a git SHA1 when you do a git archive
-
__revision__ = '$Format:%H$'
import os
-from qgis.core import *
-from PyQt4.QtCore import *
-from PyQt4.QtGui import *
-
+import re
+import PyQt4.QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
-from processing.core.GeoAlgorithmExecutionException import \
- GeoAlgorithmExecutionException
-from processing.core.WrongHelpFileException import WrongHelpFileException
-from processing.core.ProcessingLog import ProcessingLog
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterSelection import ParameterSelection
-from processing.parameters.ParameterExtent import ParameterExtent
+from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
+from processing.core.ProcessingLog import ProcessingLog
+from processing.core.ProcessingUtils import ProcessingUtils
+from processing.core.WrongHelpFileException import WrongHelpFileException
from processing.parameters.ParameterFactory import ParameterFactory
from processing.outputs.OutputFactory import OutputFactory
-from processing.tools.system import *
-
from processing.otb.OTBUtils import OTBUtils
-
+from processing.parameters.ParameterExtent import ParameterExtent
+import xml.etree.ElementTree as ET
+import traceback
+import inspect
class OTBAlgorithm(GeoAlgorithm):
- REGION_OF_INTEREST = 'ROI'
+ REGION_OF_INTEREST = "ROI"
def __init__(self, descriptionfile):
GeoAlgorithm.__init__(self)
@@ -58,7 +60,11 @@ def __init__(self, descriptionfile):
self.descriptionFile = descriptionfile
self.defineCharacteristicsFromFile()
self.numExportedLayers = 0
- self.hasROI = None
+ self.hasROI = None;
+
+
+ def __str__(self):
+ return( "Algo : " + self.name + " from app : " + self.cliName + " in : " + self.group )
def getCopy(self):
newone = OTBAlgorithm(self.descriptionFile)
@@ -66,74 +72,109 @@ def getCopy(self):
return newone
def getIcon(self):
- return QIcon(os.path.dirname(__file__) + '/../images/otb.png')
+ return PyQt4.QtGui.QIcon(os.path.dirname(__file__) + "/../images/otb.png")
def helpFile(self):
- folder = os.path.join(OTBUtils.otbDescriptionPath(), 'doc')
- helpfile = os.path.join(str(folder), self.appkey + '.html')
+ folder = os.path.join( OTBUtils.otbDescriptionPath(), 'doc' )
+ helpfile = os.path.join( str(folder), self.appkey + ".html")
if os.path.exists(helpfile):
return helpfile
else:
- raise WrongHelpFileException(
- 'Could not find help file for this algorithm. If you \
- have it put it in: ' + str(folder))
+ raise WrongHelpFileException("Could not find help file for this algorithm. \nIf you have it put it in: \n"+str(folder))
+
+
+ def adapt_list_to_string(self, c_list):
+ a_list = c_list[1:]
+ if a_list[0] in ["ParameterVector", "ParameterMultipleInput"]:
+ if c_list[0] == "ParameterType_InputImageList":
+ a_list[3] = 3
+ else:
+ a_list[3] = -1
+
+ a_list[1] = "-%s" % a_list[1]
+ def mystr(par):
+ if type(par) == type([]):
+ return ";".join(par)
+ return str(par)
+ b_list = map(mystr, a_list)
+ res = "|".join(b_list)
+ return res
+
+ def get_list_from_node(self, myet):
+ all_params = []
+ for parameter in myet.iter('parameter'):
+ rebuild = []
+ par_type = parameter.find('parameter_type').text
+ key = parameter.find('key').text
+ name = parameter.find('name').text
+ source_par_type = parameter.find('parameter_type').attrib['source_parameter_type']
+ rebuild.append(source_par_type)
+ rebuild.append(par_type)
+ rebuild.append(key)
+ rebuild.append(name)
+ for each in parameter[4:]:
+ if not each.tag in ["hidden"]:
+ if len(each.getchildren()) == 0:
+ rebuild.append(each.text)
+ else:
+ rebuild.append([item.text for item in each.iter('choice')])
+ all_params.append(rebuild)
+ return all_params
+
def defineCharacteristicsFromFile(self):
- lines = open(self.descriptionFile)
- line = lines.readline().strip('\n').strip()
- self.appkey = line
- line = lines.readline().strip('\n').strip()
- self.cliName = line
- line = lines.readline().strip('\n').strip()
- self.name = line
- line = lines.readline().strip('\n').strip()
- self.group = line
- while line != '':
+ content = open(self.descriptionFile).read()
+ dom_model = ET.fromstring(content)
+
+ self.appkey = dom_model.find('key').text
+ self.cliName = dom_model.find('exec').text
+ self.name = dom_model.find('longname').text
+ self.group = dom_model.find('group').text
+
+ ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, "Reading parameters... for %s" % self.appkey)
+
+ rebu = None
+ the_result = None
+
+ try:
+ rebu = self.get_list_from_node(dom_model)
+ the_result = map(self.adapt_list_to_string,rebu)
+ except Exception, e:
+ ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, "Could not open OTB algorithm: " + self.descriptionFile + "\n" + traceback.format_exc())
+ raise e
+
+ for line in the_result:
try:
- line = line.strip('\n').strip()
- if line.startswith('Parameter'):
+ if line.startswith("Parameter"):
param = ParameterFactory.getFromString(line)
- # Hack for initializing the elevation parameters
- # from Processing configuration
- if param.name == '-elev.dem.path' or param.name \
- == '-elev.dem':
+ # Hack for initializing the elevation parameters from Processing configuration
+ if param.name == "-elev.dem.path" or param.name == "-elev.dem" or "elev.dem" in param.name:
param.default = OTBUtils.otbSRTMPath()
- elif param.name == '-elev.dem.geoid' or param.name \
- == '-elev.geoid':
+ elif param.name == "-elev.dem.geoid" or param.name == "-elev.geoid" or "elev.geoid" in param.name:
param.default = OTBUtils.otbGeoidPath()
self.addParameter(param)
- elif line.startswith('*Parameter'):
+ elif line.startswith("*Parameter"):
param = ParameterFactory.getFromString(line[1:])
param.isAdvanced = True
self.addParameter(param)
- elif line.startswith('Extent'):
- self.addParameter(ParameterExtent(self.REGION_OF_INTEREST,
- 'Region of interest', '0,1,0,1'))
+ elif line.startswith("Extent"):
+ self.addParameter(ParameterExtent(self.REGION_OF_INTEREST, "Region of interest", "0,1,0,1"))
self.hasROI = True
else:
self.addOutput(OutputFactory.getFromString(line))
- line = lines.readline().strip('\n').strip()
- except Exception, e:
- ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
- 'Could not open OTB algorithm: '
- + self.descriptionFile + '\n' + line)
+ except Exception,e:
+ ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, "Could not open OTB algorithm: " + self.descriptionFile + "\n" + line)
raise e
- lines.close()
- def checkBeforeOpeningParametersDialog(self):
- path = OTBUtils.otbPath()
- libpath = OTBUtils.otbLibPath()
- if path == '' or libpath == '':
- return 'OTB folder is not configured.\nPlease configure it \
- before running OTB algorithms.'
+
def processAlgorithm(self, progress):
+ currentOs = os.name
+
path = OTBUtils.otbPath()
libpath = OTBUtils.otbLibPath()
- if path == '' or libpath == '':
- raise GeoAlgorithmExecutionException(
- 'OTB folder is not configured.\nPlease configure it \
- before running OTB algorithms.')
+ if path == "" or libpath == "":
+ raise GeoAlgorithmExecutionException("OTB folder is not configured.\nPlease configure it before running OTB algorithms.")
commands = []
commands.append(path + os.sep + self.cliName)
@@ -141,28 +182,91 @@ def processAlgorithm(self, progress):
self.roiVectors = {}
self.roiRasters = {}
for param in self.parameters:
- if param.value is None or param.value == '':
+ # get the given input(s)
+ if param.name in ["-il", "-in"] :
+ newparams = ""
+ listeParameters = param.value.split(";")
+ for inputParameter in listeParameters :
+ # if HDF5 file
+ if "HDF5" in inputParameter :
+ if currentOs == "posix" :
+ data = inputParameter[6:]
+ else :
+ data = inputParameter[5:]
+ dataset = data
+
+ #on windows, there isn't "
+ #if data[-1] == '"':
+ if currentOs == "posix" :
+ data = data[:data.index('"')]
+ else :
+ data = data[:data.index('://')]
+ #try :
+ if currentOs == "posix" :
+ dataset.index('"')
+ dataset = os.path.basename( data ) + dataset[dataset.index('"'):]
+ #except ValueError :
+ else :
+ #dataset = os.path.basename( data ) + '"' + dataset[dataset.index('://'):]
+ dataset = dataset[dataset.index('://'):]
+
+ #get index of the subdataset with gdal
+ if currentOs == "posix" :
+ commandgdal = "gdalinfo " + data + " | grep '" + dataset + "$'"
+ else :
+ commandgdal = "gdalinfo " + data + " | findstr \"" + dataset + "$\""
+ resultGDAL = os.popen( commandgdal ).readlines()
+ indexSubdataset = -1
+ if resultGDAL :
+ indexSubdatasetString = re.search("SUBDATASET_(\d+)_", resultGDAL[0])
+ if indexSubdatasetString :
+ #match between ()
+ indexSubdataset = indexSubdatasetString.group(1)
+ else :
+ indexSubdataset = -1
+ else :
+ print "Error : no match of ", dataset, "$ in gdalinfo " + data
+ indexSubdataset = -1
+
+
+ if not indexSubdataset == -1 :
+ indexSubdataset = int(indexSubdataset) -1
+ newParam = "\'" + data + "?&sdataidx=" + str(indexSubdataset) + "\'"
+
+ else :
+ newParam = inputParameter
+
+ newparams += newParam
+ # no hdf5
+ else :
+ newparams += inputParameter
+ newparams += ";"
+ if newparams[-1] == ";":
+ newparams = newparams[:-1]
+ param.value = newparams
+
+ if param.value == None or param.value == "":
continue
if isinstance(param, ParameterVector):
commands.append(param.name)
if self.hasROI:
- roiFile = getTempFilename('shp')
+ roiFile = ProcessingUtils.getTempFilename('shp')
commands.append(roiFile)
self.roiVectors[param.value] = roiFile
else:
- commands.append('"' + param.value + '"')
+ commands.append("\"" + param.value+ "\"")
elif isinstance(param, ParameterRaster):
commands.append(param.name)
if self.hasROI:
- roiFile = getTempFilename('tif')
+ roiFile = ProcessingUtils.getTempFilename('tif')
commands.append(roiFile)
self.roiRasters[param.value] = roiFile
else:
- commands.append('"' + param.value + '"')
+ commands.append("\"" + param.value+ "\"")
elif isinstance(param, ParameterMultipleInput):
commands.append(param.name)
- files = str(param.value).split(';')
- paramvalue = ' '.join(['"' + f + '"' for f in files])
+ files = str(param.value).split(";")
+ paramvalue = " ".join(["\"" + f + "\"" for f in files])
commands.append(paramvalue)
elif isinstance(param, ParameterSelection):
commands.append(param.name)
@@ -173,7 +277,7 @@ def processAlgorithm(self, progress):
commands.append(param.name)
commands.append(str(param.value).lower())
elif isinstance(param, ParameterExtent):
- self.roiValues = param.value.split(',')
+ self.roiValues = param.value.split(",")
else:
commands.append(param.name)
commands.append(str(param.value))
@@ -181,54 +285,63 @@ def processAlgorithm(self, progress):
for out in self.outputs:
commands.append(out.name)
commands.append('"' + out.value + '"')
- for (roiInput, roiFile) in self.roiRasters.items():
- (startX, startY) = (float(self.roiValues[0]),
- float(self.roiValues[1]))
+ for roiInput, roiFile in self.roiRasters.items():
+ startX, startY = float(self.roiValues[0]), float(self.roiValues[1])
sizeX = float(self.roiValues[2]) - startX
sizeY = float(self.roiValues[3]) - startY
helperCommands = [
- 'otbcli_ExtractROI',
- '-in',
- roiInput,
- '-out',
- roiFile,
- '-startx',
- str(startX),
- '-starty',
- str(startY),
- '-sizex',
- str(sizeX),
- '-sizey',
- str(sizeY),
- ]
+ "otbcli_ExtractROI",
+ "-in", roiInput,
+ "-out", roiFile,
+ "-startx", str(startX),
+ "-starty", str(startY),
+ "-sizex", str(sizeX),
+ "-sizey", str(sizeY)]
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, helperCommands)
progress.setCommand(helperCommands)
OTBUtils.executeOtb(helperCommands, progress)
if self.roiRasters:
supportRaster = self.roiRasters.itervalues().next()
- for (roiInput, roiFile) in self.roiVectors.items():
+ for roiInput, roiFile in self.roiVectors.items():
helperCommands = [
- 'otbcli_VectorDataExtractROIApplication',
- '-vd.in',
- roiInput,
- '-io.in',
- supportRaster,
- '-io.out',
- roiFile,
- '-elev.dem.path',
- OTBUtils.otbSRTMPath(),
- ]
+ "otbcli_VectorDataExtractROIApplication",
+ "-vd.in", roiInput,
+ "-io.in", supportRaster,
+ "-io.out", roiFile,
+ "-elev.dem.path", OTBUtils.otbSRTMPath()]
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, helperCommands)
progress.setCommand(helperCommands)
OTBUtils.executeOtb(helperCommands, progress)
loglines = []
- loglines.append('OTB execution command')
+ loglines.append("OTB execution command")
for line in commands:
loglines.append(line)
progress.setCommand(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
+ import processing.otb.OTBSpecific_XMLLoading
+ module = processing.otb.OTBSpecific_XMLLoading
+
+ found = False
+ if 'adapt%s' % self.appkey in dir(module):
+ found = True
+ commands = getattr(module, 'adapt%s' % self.appkey)(commands)
+ else:
+ the_key = 'adapt%s' % self.appkey
+ if '-' in the_key:
+ base_key = the_key.split("-")[0]
+ if base_key in dir(module):
+ found = True
+ commands = getattr(module, base_key)(commands)
+
+ if not found:
+ ProcessingLog.addToLog(ProcessingLog.LOG_INFO, "Adapter for %s not found" % the_key)
+ frames = inspect.getouterframes(inspect.currentframe())[1:]
+ for a_frame in frames:
+ frame,filename,line_number,function_name,lines,index = a_frame
+ ProcessingLog.addToLog(ProcessingLog.LOG_INFO, "%s %s %s %s %s %s" % (frame,filename,line_number,function_name,lines,index))
+
OTBUtils.executeOtb(commands, progress)
diff --git a/python/plugins/processing/otb/OTBAlgorithmProvider.py b/python/plugins/processing/otb/OTBAlgorithmProvider.py
index f03ac6b72385..e2f0fed9a84e 100644
--- a/python/plugins/processing/otb/OTBAlgorithmProvider.py
+++ b/python/plugins/processing/otb/OTBAlgorithmProvider.py
@@ -6,7 +6,12 @@
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
+ (C) 2013 by CS Systemes d'information
Email : volayaf at gmail dot com
+ otb at c-s dot fr
+ Contributors : Victor Olaya
+ Julien Malik - Changing the way to load algorithms : loading from xml
+ Oscar Picas - Changing the way to load algorithms : loading from xml
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
@@ -20,19 +25,16 @@
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
-
# This will get replaced with a git SHA1 when you do a git archive
-
__revision__ = '$Format:%H$'
import os
-from PyQt4.QtGui import *
+import PyQt4.QtGui
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingConfig import ProcessingConfig, Setting
-from processing.core.ProcessingLog import ProcessingLog
from processing.otb.OTBUtils import OTBUtils
from processing.otb.OTBAlgorithm import OTBAlgorithm
-
+from processing.core.ProcessingLog import ProcessingLog
class OTBAlgorithmProvider(AlgorithmProvider):
@@ -41,14 +43,15 @@ def __init__(self):
self.activate = True
self.createAlgsList()
+
def getDescription(self):
- return 'Orfeo Toolbox (Image analysis)'
+ return "Orfeo Toolbox (Image analysis)"
def getName(self):
- return 'otb'
+ return "otb"
def getIcon(self):
- return QIcon(os.path.dirname(__file__) + '/../images/otb.png')
+ return PyQt4.QtGui.QIcon(os.path.dirname(__file__) + "/../images/otb.png")
def _loadAlgorithms(self):
self.algs = self.preloadedAlgs
@@ -57,36 +60,24 @@ def createAlgsList(self):
self.preloadedAlgs = []
folder = OTBUtils.otbDescriptionPath()
for descriptionFile in os.listdir(folder):
- if descriptionFile.endswith('txt'):
+ if descriptionFile.endswith("xml"):
try:
alg = OTBAlgorithm(os.path.join(folder, descriptionFile))
- if alg.name.strip() != '':
+
+ if alg.name.strip() != "":
self.preloadedAlgs.append(alg)
else:
- ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
- 'Could not open OTB algorithm: '
- + descriptionFile)
- except Exception, e:
- ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
- 'Could not open OTB algorithm: ' + descriptionFile)
+ ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, "Could not open OTB algorithm: " + descriptionFile)
+ except Exception,e:
+ ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, "Could not open OTB algorithm: " + descriptionFile)
+
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
- ProcessingConfig.addSetting(Setting(self.getDescription(),
- OTBUtils.OTB_FOLDER,
- 'OTB command line tools folder',
- OTBUtils.otbPath()))
- ProcessingConfig.addSetting(Setting(self.getDescription(),
- OTBUtils.OTB_LIB_FOLDER,
- 'OTB applications folder',
- OTBUtils.otbLibPath()))
- ProcessingConfig.addSetting(Setting(self.getDescription(),
- OTBUtils.OTB_SRTM_FOLDER,
- 'SRTM tiles folder',
- OTBUtils.otbSRTMPath()))
- ProcessingConfig.addSetting(Setting(self.getDescription(),
- OTBUtils.OTB_GEOID_FILE, 'Geoid file',
- OTBUtils.otbGeoidPath()))
+ ProcessingConfig.addSetting(Setting(self.getDescription(), OTBUtils.OTB_FOLDER, "OTB command line tools folder", OTBUtils.otbPath()))
+ ProcessingConfig.addSetting(Setting(self.getDescription(), OTBUtils.OTB_LIB_FOLDER, "OTB applications folder", OTBUtils.otbLibPath()))
+ ProcessingConfig.addSetting(Setting(self.getDescription(), OTBUtils.OTB_SRTM_FOLDER, "SRTM tiles folder", OTBUtils.otbSRTMPath()))
+ ProcessingConfig.addSetting(Setting(self.getDescription(), OTBUtils.OTB_GEOID_FILE, "Geoid file", OTBUtils.otbGeoidPath()))
def unload(self):
AlgorithmProvider.unload(self)
diff --git a/python/plugins/processing/otb/OTBSpecific_XMLLoading.py b/python/plugins/processing/otb/OTBSpecific_XMLLoading.py
new file mode 100644
index 000000000000..583a5930a37a
--- /dev/null
+++ b/python/plugins/processing/otb/OTBSpecific_XMLLoading.py
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ OTBUtils.py
+ ---------------------
+ Date : 11-12-13
+ Copyright : (C) 2013 by CS Systemes d'information (CS SI)
+ Email : otb at c-s dot fr (CS SI)
+ Contributors : Julien Malik (CS SI) - creation of otbspecific
+ Oscar Picas (CS SI) -
+ Alexia Mondot (CS SI) - split otbspecific into 2 files
+ add functions
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+
+When an OTB algorithms is run, this file allows to adapt user parameter to fit the otbapplication.
+
+Most of the following functions are like follows :
+ adaptNameOfTheOTBApplication(commands_list)
+The command list is a list of all parameters of the given algorithm with all user values.
+"""
+
+
+__author__ = 'Julien Malik, Oscar Picas, Alexia Mondot'
+__date__ = 'December 2013'
+__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
+# This will get replaced with a git SHA1 when you do a git archive
+__revision__ = '$Format:%H$'
+__version__ = "3.8"
+
+import os
+
+try:
+ import processing
+except ImportError, e:
+ raise Exception("Processing must be installed and available in PYTHONPATH")
+
+from processing.core.ProcessingLog import ProcessingLog
+from processing.core.ProcessingConfig import ProcessingConfig
+
+from processing.otb.OTBUtils import *
+
+
+
+def adaptBinaryMorphologicalOperation(commands_list):
+ val = commands_list[commands_list.index("-filter") + 1]
+
+ def replace_dilate(param, value):
+ if ".dilate" in str(param):
+ return param.replace("dilate", value)
+ else:
+ return param
+
+ import functools
+ com_list = map(functools.partial(replace_dilate, value=val), commands_list)
+
+ val = com_list[com_list.index("-structype.ball.xradius") + 1]
+
+ pos = com_list.index("-structype.ball.xradius") + 2
+
+ com_list.insert(pos, '-structype.ball.yradius')
+ com_list.insert(pos + 1, val)
+
+ return com_list
+
+
+def adaptEdgeExtraction(commands_list):
+ """
+ Add filter.touzi.yradius as the same value as filter.touzi.xradius
+ """
+ val = commands_list[commands_list.index("-filter") + 1]
+ if val == 'touzi':
+ bval = commands_list[commands_list.index("-filter.touzi.xradius") + 1]
+ pos = commands_list.index("-filter.touzi.xradius") + 2
+ commands_list.insert(pos, "-filter.touzi.yradius")
+ commands_list.insert(pos + 1, bval)
+ return commands_list
+
+
+def adaptGrayScaleMorphologicalOperation(commands_list):
+ """
+ Add structype.ball.yradius as the same value as structype.ball.xradius (as it is a ball)
+ """
+ val = commands_list[commands_list.index("-structype.ball.xradius") + 1]
+ pos = commands_list.index("-structype.ball.xradius") + 2
+ commands_list.insert(pos, "-structype.ball.yradius")
+ commands_list.insert(pos + 1, val)
+ return commands_list
+
+
+def adaptSplitImage(commands_list):
+ """
+ Ran by default, the extension of output file is .file. Replace it with ".tif"
+ If no extension given, put ".tif" at the end of the filename.
+ """
+ commands_list2 = []
+ for item in commands_list:
+ if ".file" in item:
+ item = item.replace(".file", ".tif")
+ if item == "-out":
+ index = commands_list.index(item)
+ if not "." in os.path.basename(commands_list[index + 1] ):
+ commands_list[index + 1] = commands_list[index + 1][:-1] + ".tif" + commands_list[index + 1][-1]
+ commands_list2.append(item)
+ return commands_list2
+
+
+def adaptLSMSVectorization(commands_list):
+ """
+ Ran by default, the extension of output file is .file. Replace it with ".shp"
+ If no extension given, put ".shp" at the end of the filename.
+ """
+ commands_list2 = []
+ for item in commands_list:
+ if ".file" in item:
+ item = item.replace(".file", ".shp")
+ if item == "-out":
+ index = commands_list.index(item)
+ if not "." in os.path.basename(commands_list[index + 1] ):
+ commands_list[index + 1] = commands_list[index + 1][:-1] + ".shp" + commands_list[index + 1][-1]
+ commands_list2.append(item)
+
+ return commands_list2
+
+def adaptComputeImagesStatistics(commands_list):
+ """
+ Ran by default, the extension of output file is .file. Replace it with ".xml"
+ If no extension given, put ".shp" at the end of the filename.
+ """
+ commands_list2 = []
+ for item in commands_list:
+ if ".file" in item:
+ item = item.replace(".file", ".xml")
+ commands_list2.append(item)
+ if item == "-out":
+ index = commands_list.index(item)
+ if not "." in os.path.basename(commands_list[index + 1] ):
+ commands_list[index + 1] = commands_list[index + 1][:-1] + ".xml" + commands_list[index + 1][-1]
+
+ return commands_list2
+
+
+def adaptKmzExport(commands_list):
+ """
+ Ran by default, the extension of output file is .file. Replace it with ".kmz"
+ If no extension given, put ".kmz" at the end of the filename.
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ adaptGeoidSrtm(commands_list)
+ commands_list2 = []
+ for item in commands_list:
+ if ".file" in item:
+ item = item.replace(".file", ".kmz")
+ if item == "-out":
+ index = commands_list.index(item)
+ if not "." in os.path.basename(commands_list[index + 1] ):
+ commands_list[index + 1] = commands_list[index + 1][:-1] + ".kmz" + commands_list[index + 1][-1]
+
+ commands_list2.append(item)
+ return commands_list2
+
+
+def adaptColorMapping(commands_list):
+ """
+ The output of this algorithm must be in uint8.
+ """
+ indexInput = commands_list.index("-out")
+ commands_list[indexInput+1] = commands_list[indexInput+1] + " uint8"
+ return commands_list
+
+
+
+def adaptStereoFramework(commands_list):
+ """
+ Remove parameter and user value instead of giving None.
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ commands_list2 = commands_list
+ adaptGeoidSrtm(commands_list2)
+ for item in commands_list:
+ if "None" in item:
+ index = commands_list2.index(item)
+ argumentToRemove = commands_list2[index-1]
+ commands_list2.remove(item)
+ commands_list2.remove(argumentToRemove)
+ #commands_list2.append(item)
+ return commands_list2
+
+
+def adaptComputeConfusionMatrix(commands_list):
+ """
+ Ran by default, the extension of output file is .file. Replace it with ".csv"
+ If no extension given, put ".csv" at the end of the filename.
+ """
+ commands_list2 = []
+ for item in commands_list:
+ if ".file" in item:
+ item = item.replace(".file", ".csv")
+ if item == "-out":
+ index = commands_list.index(item)
+ if not "." in os.path.basename(commands_list[index + 1] ):
+ commands_list[index + 1] = commands_list[index + 1][:-1] + ".csv" + commands_list[index + 1][-1]
+
+ commands_list2.append(item)
+ return commands_list2
+
+
+def adaptRadiometricIndices(commands_list):
+ """
+ Replace indice nickname by its corresponding entry in the following dictionnary :
+ indices = {"ndvi" : "Vegetation:NDVI", "tndvi" : "Vegetation:TNDVI", "rvi" : "Vegetation:RVI", "savi" : "Vegetation:SAVI",
+ "tsavi" : "Vegetation:TSAVI", "msavi" : "Vegetation:MSAVI", "msavi2" : "Vegetation:MSAVI2", "gemi" : "Vegetation:GEMI",
+ "ipvi" : "Vegetation:IPVI",
+ "ndwi" : "Water:NDWI", "ndwi2" : "Water:NDWI2", "mndwi" :"Water:MNDWI" , "ndpi" : "Water:NDPI",
+ "ndti" : "Water:NDTI",
+ "ri" : "Soil:RI", "ci" : "Soil:CI", "bi" : "Soil:BI", "bi2" : "Soil:BI2"}
+ """
+# "laindvilog" : , "lairefl" : , "laindviformo" : ,
+ indices = {"ndvi" : "Vegetation:NDVI", "tndvi" : "Vegetation:TNDVI", "rvi" : "Vegetation:RVI", "savi" : "Vegetation:SAVI",
+ "tsavi" : "Vegetation:TSAVI", "msavi" : "Vegetation:MSAVI", "msavi2" : "Vegetation:MSAVI2", "gemi" : "Vegetation:GEMI",
+ "ipvi" : "Vegetation:IPVI",
+ "ndwi" : "Water:NDWI", "ndwi2" : "Water:NDWI2", "mndwi" :"Water:MNDWI" , "ndpi" : "Water:NDPI",
+ "ndti" : "Water:NDTI",
+ "ri" : "Soil:RI", "ci" : "Soil:CI", "bi" : "Soil:BI", "bi2" : "Soil:BI2"}
+ for item in commands_list:
+ if item in indices:
+ commands_list[commands_list.index(item)] = indices[item]
+ return commands_list
+
+
+def adaptDisparityMapToElevationMap(commands_list):
+ """
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ adaptGeoidSrtm(commands_list)
+ return commands_list
+
+
+def adaptConnectedComponentSegmentation(commands_list):
+ """
+ Remove parameter and user value instead of giving None.
+ """
+ commands_list2 = commands_list
+ adaptGeoidSrtm(commands_list2)
+ for item in commands_list:
+ if "None" in item:
+ index = commands_list2.index(item)
+ argumentToRemove = commands_list2[index-1]
+ commands_list2.remove(item)
+ commands_list2.remove(argumentToRemove)
+ #commands_list2.append(item)
+ return commands_list2
+
+
+def adaptSuperimpose(commands_list):
+ """
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ adaptGeoidSrtm(commands_list)
+ return commands_list
+
+
+def adaptOrthoRectification(commands_list):
+ """
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ adaptGeoidSrtm(commands_list)
+ return commands_list
+
+
+def adaptExtractROI(commands_list):
+ """
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ adaptGeoidSrtm(commands_list)
+ return commands_list
+
+
+def adaptTrainImagesClassifier(commands_list):
+ """
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ adaptGeoidSrtm(commands_list)
+ return commands_list
+
+def adaptGeoidSrtm(commands_list):
+ """
+ Check geoid file, srtm folder and given elevation and manage arguments.
+ """
+ srtm, geoid = ckeckGeoidSrtmSettings()
+
+
+ if srtm :
+ if commands_list[0].endswith("ExtractROI") :
+ commands_list.append("-mode.fit.elev.dem")
+ commands_list.append(srtm)
+ else :
+ commands_list.append("-elev.dem")
+ commands_list.append(srtm)
+ if geoid :
+ if commands_list[0].endswith("ExtractROI") :
+ commands_list.append("-mode.fit.elev.geoid")
+ commands_list.append(geoid)
+ else :
+ commands_list.append("-elev.geoid")
+ commands_list.append(geoid)
+
+
+
+def ckeckGeoidSrtmSettings():
+ folder = ProcessingConfig.getSetting(OTBUtils.OTB_SRTM_FOLDER)
+ if folder == None:
+ folder =""
+
+ filepath = ProcessingConfig.getSetting(OTBUtils.OTB_GEOID_FILE)
+ if filepath == None:
+ filepath =""
+
+ return folder, filepath
diff --git a/python/plugins/processing/otb/OTBUtils.py b/python/plugins/processing/otb/OTBUtils.py
index 3603d3c4575a..15678c2d820e 100644
--- a/python/plugins/processing/otb/OTBUtils.py
+++ b/python/plugins/processing/otb/OTBUtils.py
@@ -6,7 +6,12 @@
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
+ (C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
+ otb at c-s dot fr (CS SI)
+ Contributors : Victor Olaya
+ Julien Malik, Oscar Picas (CS SI) - add functions to manage xml tree
+ Alexia Mondot (CS SI) - add a trick for OTBApplication SplitImages
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
@@ -20,78 +25,80 @@
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
-
# This will get replaced with a git SHA1 when you do a git archive
-
__revision__ = '$Format:%H$'
import os
-import subprocess
+import glob
from qgis.core import QgsApplication
+import subprocess
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
-from processing.tools.system import *
+from processing.core.ProcessingUtils import ProcessingUtils
+import logging
+import xml.etree.ElementTree as ET
+import traceback
+from processing.core.QGisLayers import QGisLayers
+import qgis.core
+import PyQt4.QtGui
class OTBUtils:
- OTB_FOLDER = 'OTB_FOLDER'
- OTB_LIB_FOLDER = 'OTB_LIB_FOLDER'
- OTB_SRTM_FOLDER = 'OTB_SRTM_FOLDER'
- OTB_GEOID_FILE = 'OTB_GEOID_FILE'
+ OTB_FOLDER = "OTB_FOLDER"
+ OTB_LIB_FOLDER = "OTB_LIB_FOLDER"
+ OTB_SRTM_FOLDER = "OTB_SRTM_FOLDER"
+ OTB_GEOID_FILE = "OTB_GEOID_FILE"
@staticmethod
def otbPath():
folder = ProcessingConfig.getSetting(OTBUtils.OTB_FOLDER)
- if folder is None:
- folder = ''
-
- # Try to configure the path automatically
- if isMac():
- testfolder = os.path.join(str(QgsApplication.prefixPath()),
- 'bin')
- if os.path.exists(os.path.join(testfolder, 'otbcli')):
+ if folder == None:
+ folder = ""
+ #try to configure the path automatically
+ if ProcessingUtils.isMac():
+ testfolder = os.path.join(str(QgsApplication.prefixPath()), "bin")
+ if os.path.exists(os.path.join(testfolder, "otbcli")):
folder = testfolder
else:
- testfolder = '/usr/local/bin'
- if os.path.exists(os.path.join(testfolder, 'otbcli')):
+ testfolder = "/usr/local/bin"
+ if os.path.exists(os.path.join(testfolder, "otbcli")):
folder = testfolder
- elif isWindows():
+ elif ProcessingUtils.isWindows():
testfolder = os.path.dirname(str(QgsApplication.prefixPath()))
testfolder = os.path.dirname(testfolder)
- testfolder = os.path.join(testfolder, 'bin')
- path = os.path.join(testfolder, 'otbcli.bat')
+ testfolder = os.path.join(testfolder, "bin")
+ path = os.path.join(testfolder, "otbcli.bat")
if os.path.exists(path):
folder = testfolder
else:
- testfolder = '/usr/bin'
- if os.path.exists(os.path.join(testfolder, 'otbcli')):
+ testfolder = "/usr/bin"
+ if os.path.exists(os.path.join(testfolder, "otbcli")):
folder = testfolder
return folder
@staticmethod
def otbLibPath():
folder = ProcessingConfig.getSetting(OTBUtils.OTB_LIB_FOLDER)
- if folder is None:
- folder = ''
- # Try to configure the path automatically
- if isMac():
- testfolder = os.path.join(str(QgsApplication.prefixPath()),
- 'lib/otb/applications')
+ if folder == None:
+ folder =""
+ #try to configure the path automatically
+ if ProcessingUtils.isMac():
+ testfolder = os.path.join(str(QgsApplication.prefixPath()), "lib/otb/applications")
if os.path.exists(testfolder):
folder = testfolder
else:
- testfolder = '/usr/local/lib/otb/applications'
+ testfolder = "/usr/local/lib/otb/applications"
if os.path.exists(testfolder):
folder = testfolder
- elif isWindows():
+ elif ProcessingUtils.isWindows():
testfolder = os.path.dirname(str(QgsApplication.prefixPath()))
- testfolder = os.path.join(testfolder, 'orfeotoolbox')
- testfolder = os.path.join(testfolder, 'applications')
+ testfolder = os.path.join(testfolder, "orfeotoolbox")
+ testfolder = os.path.join(testfolder, "applications")
if os.path.exists(testfolder):
folder = testfolder
else:
- testfolder = '/usr/lib/otb/applications'
+ testfolder = "/usr/lib/otb/applications"
if os.path.exists(testfolder):
folder = testfolder
return folder
@@ -99,42 +106,135 @@ def otbLibPath():
@staticmethod
def otbSRTMPath():
folder = ProcessingConfig.getSetting(OTBUtils.OTB_SRTM_FOLDER)
- if folder is None:
- folder = ''
+ if folder == None:
+ folder =""
return folder
@staticmethod
def otbGeoidPath():
filepath = ProcessingConfig.getSetting(OTBUtils.OTB_GEOID_FILE)
- if filepath is None:
- filepath = ''
+ if filepath == None:
+ filepath =""
return filepath
@staticmethod
def otbDescriptionPath():
- return os.path.join(os.path.dirname(__file__), 'description')
+ return os.path.join(os.path.dirname(__file__), "description")
@staticmethod
def executeOtb(commands, progress):
loglines = []
- loglines.append('OTB execution console output')
+ loglines.append("OTB execution console output")
os.putenv('ITK_AUTOLOAD_PATH', OTBUtils.otbLibPath())
fused_command = ''.join(['"%s" ' % c for c in commands])
- proc = subprocess.Popen(
- fused_command,
- shell=True,
- stdout=subprocess.PIPE,
- stdin=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- universal_newlines=True,
- ).stdout
- for line in iter(proc.readline, ''):
- if '[*' in line:
- idx = line.find('[*')
- perc = int(line[idx - 4:idx - 2].strip(' '))
+ proc = subprocess.Popen(fused_command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True).stdout
+ for line in iter(proc.readline, ""):
+ if "[*" in line:
+ idx = line.find("[*")
+ perc = int(line[idx-4:idx-2].strip(" "))
if perc != 0:
progress.setPercentage(perc)
else:
loglines.append(line)
progress.setConsoleInfo(line)
+
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
+
+
+
+def get_choices_of(doc, parameter):
+ choices = []
+ try:
+ t5 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter]
+ choices = [item.text for item in t5[0].findall('options/choices/choice')]
+ except:
+ logger = logging.getLogger('OTBGenerator')
+ logger.warning(traceback.format_exc())
+ return choices
+
+def remove_dependant_choices(doc, parameter, choice):
+ choices = get_choices_of(doc, parameter)
+ choices.remove(choice)
+ for a_choice in choices:
+ t4 = [item for item in doc.findall('.//parameter') if '.%s' % a_choice in item.find('key').text]
+ for t5 in t4:
+ doc.remove(t5)
+
+def renameValueField(doc, textitem, field, newValue):
+ t4 = [item for item in doc.findall('.//parameter') if item.find('key').text == textitem]
+ for t5 in t4:
+ t5.find(field).text = newValue
+
+
+def remove_independant_choices(doc, parameter, choice):
+ choices = []
+ choices.append(choice)
+ for a_choice in choices:
+ t4 = [item for item in doc.findall('.//parameter') if '.%s' % a_choice in item.find('key').text]
+ for t5 in t4:
+ doc.remove(t5)
+
+def remove_parameter_by_key(doc, parameter):
+ t4 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter]
+ for t5 in t4:
+ doc.remove(t5)
+
+def remove_other_choices(doc, parameter, choice):
+ t5 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter]
+ if len(t5) > 0:
+ choices = [item for item in t5[0].findall('options/choices/choice') if item.text != choice]
+ choice_root = t5[0].findall('options/choices')[0]
+ for a_choice in choices:
+ choice_root.remove(a_choice)
+
+def remove_choice(doc, parameter, choice):
+ t5 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter]
+ if len(t5) > 0:
+ choices = [item for item in t5[0].findall('options/choices/choice') if item.text == choice]
+ choice_root = t5[0].findall('options/choices')[0]
+ for a_choice in choices:
+ choice_root.remove(a_choice)
+
+def split_by_choice(doc, parameter):
+ """
+ splits the given doc into several docs according to the given parameter
+ returns a dictionnary of documents
+ """
+ result = {}
+ choices = get_choices_of(doc, parameter)
+ import copy
+ for choice in choices:
+ #creates a new copy of the document
+ working_copy = copy.deepcopy(doc)
+ remove_dependant_choices(working_copy, parameter, choice)
+ #remove all other choices except the current one
+ remove_other_choices(working_copy, parameter, choice)
+ #set a new name according to the choice
+ old_app_name = working_copy.find('key').text
+ working_copy.find('key').text = '%s-%s' % (old_app_name, choice)
+ old_longname = working_copy.find('longname').text
+ working_copy.find('longname').text = '%s (%s)' % (old_app_name, choice)
+ #add it to the dictionnary
+ result[choice] = working_copy
+ return result
+
+def remove_parameter_by_criteria(doc, criteria):
+ t4 = [item for item in doc.findall('./parameter') if criteria(item)]
+ for t5 in t4:
+ doc.getroot().remove(t5)
+
+def defaultWrite(available_app, original_dom_document):
+ fh = open("description/%s.xml" % available_app, "w")
+ the_root = original_dom_document
+ logger = logging.getLogger('OTBGenerator')
+ ET.ElementTree(the_root).write(fh)
+ fh.close()
+
+def defaultSplit(available_app, original_dom_document, parameter):
+ the_root = original_dom_document
+ splitted = split_by_choice(the_root, parameter)
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
diff --git a/python/plugins/processing/otb/description/BandMath.txt b/python/plugins/processing/otb/description/BandMath.txt
deleted file mode 100644
index a3f13deb6799..000000000000
--- a/python/plugins/processing/otb/description/BandMath.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-BandMath
-otbcli_BandMath
-Band Math
-Miscellaneous
-ParameterMultipleInput|-il|Input image list|3|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterString|-exp|Expression|
diff --git a/python/plugins/processing/otb/description/BandMath.xml b/python/plugins/processing/otb/description/BandMath.xml
new file mode 100644
index 000000000000..ca4a9359939d
--- /dev/null
+++ b/python/plugins/processing/otb/description/BandMath.xml
@@ -0,0 +1,41 @@
+
+ BandMath
+ otbcli_BandMath
+ Band Math
+ Miscellaneous
+ Perform a mathematical operation on monoband images
+
+ ParameterMultipleInput
+ il
+ Input image list
+ Image list to perform computation on.
+
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterString
+ exp
+ Expression
+ The mathematical expression to apply.
+Use im1b1 for the first band, im1b2 for the second one...
+
+
+ False
+
+
diff --git a/python/plugins/processing/otb/description/BinaryMorphologicalOperation-closing.xml b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-closing.xml
new file mode 100644
index 000000000000..9593d1af4d02
--- /dev/null
+++ b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-closing.xml
@@ -0,0 +1,72 @@
+
+ BinaryMorphologicalOperation-closing
+ otbcli_BinaryMorphologicalOperation
+ BinaryMorphologicalOperation (closing)
+ Feature Extraction
+ Performs morphological operations on an input image channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ closing
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/BinaryMorphologicalOperation-dilate.xml b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-dilate.xml
new file mode 100644
index 000000000000..1e608f06251f
--- /dev/null
+++ b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-dilate.xml
@@ -0,0 +1,90 @@
+
+ BinaryMorphologicalOperation-dilate
+ otbcli_BinaryMorphologicalOperation
+ BinaryMorphologicalOperation (dilate)
+ Feature Extraction
+ Performs morphological operations on an input image channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ dilate
+
+
+ 0
+
+
+ ParameterNumber
+ filter.dilate.foreval
+ Foreground Value
+ The Foreground Value
+
+
+ 1
+
+
+ ParameterNumber
+ filter.dilate.backval
+ Background Value
+ The Background Value
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/BinaryMorphologicalOperation-erode.xml b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-erode.xml
new file mode 100644
index 000000000000..e7ef87a93fcb
--- /dev/null
+++ b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-erode.xml
@@ -0,0 +1,72 @@
+
+ BinaryMorphologicalOperation-erode
+ otbcli_BinaryMorphologicalOperation
+ BinaryMorphologicalOperation (erode)
+ Feature Extraction
+ Performs morphological operations on an input image channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ erode
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/BinaryMorphologicalOperation-opening.xml b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-opening.xml
new file mode 100644
index 000000000000..92d8668cdb51
--- /dev/null
+++ b/python/plugins/processing/otb/description/BinaryMorphologicalOperation-opening.xml
@@ -0,0 +1,72 @@
+
+ BinaryMorphologicalOperation-opening
+ otbcli_BinaryMorphologicalOperation
+ BinaryMorphologicalOperation (opening)
+ Feature Extraction
+ Performs morphological operations on an input image channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ opening
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/BinaryMorphologicalOperation.txt b/python/plugins/processing/otb/description/BinaryMorphologicalOperation.txt
deleted file mode 100644
index 4460123e52fe..000000000000
--- a/python/plugins/processing/otb/description/BinaryMorphologicalOperation.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Binary Morphological Operation
-otbcli_BinaryMorphologicalOperation
-Binary Morphological Operation
-Image Filtering
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-channel|Selected Channel|None|None|1
-ParameterSelection|-structype|Structuring Element Type|ball;cross|0
-ParameterNumber|-structype.ball.xradius|The Structuring Element X Radius|None|None|5
-ParameterNumber|-structype.ball.yradius|The Structuring Element Y Radius|None|None|5
-ParameterSelection|-filter|Morphological Operation|dilate;erode;opening;closing|0
-ParameterNumber|-filter.dilate.foreval|Dilate Foreground Value|None|None|1
-ParameterNumber|-filter.dilate.backval|Dilate Background Value Value|None|None|0
-ParameterNumber|-filter.erode.foreval|Erode Foreground Value|None|None|1
-ParameterNumber|-filter.erode.backval|Erode Background Value|None|None|0
-ParameterNumber|-filter.opening.foreval|Opening Foreground Value|None|None|1
-ParameterNumber|-filter.opening.backval|Opening Background Value|None|None|0
-ParameterNumber|-filter.closing.foreval|Closing Foreground Value|None|None|1
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/BlockMatching.txt b/python/plugins/processing/otb/description/BlockMatching.txt
deleted file mode 100644
index fd76675c32e2..000000000000
--- a/python/plugins/processing/otb/description/BlockMatching.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-BlockMatching
-otbcli_BlockMatching
- Pixel-wise Block-Matching
-Stereo
-ParameterRaster|-io.inleft|Left input image|False
-ParameterRaster|-io.inright|Right input image|False
-OutputRaster|-io.out|The output disparity map
-OutputRaster|-io.outmask|The output mask corresponding to all criterions
-ParameterBoolean|-io.outmetric|Output optimal metric values as well|
-ParameterRaster|-mask.inleft|Discard left pixels from mask image|True
-ParameterRaster|-mask.inright|Discard right pixels from mask image|True
-ParameterNumber|-mask.nodata|Discard pixels with no-data value|None|None|0.0
-ParameterNumber|-mask.variancet|Discard pixels with low local variance|None|None|100.0
-ParameterSelection|-bm.metric|Block-matching metric|ssd;ncc;lp|0
-ParameterNumber|-bm.metric.lp.p|p value|None|None|1.0
-ParameterNumber|-bm.radius|Radius of blocks|None|None|3
-ParameterNumber|-bm.minhd|Minimum horizontal disparity|None|None|0
-ParameterNumber|-bm.maxhd|Maximum horizontal disparity|None|None|0
-ParameterNumber|-bm.minvd|Minimum vertical disparity|None|None|0
-ParameterNumber|-bm.maxvd|Maximum vertical disparity|None|None|0
-ParameterSelection|-bm.subpixel|Sub-pixel interpolation|none;parabolic;triangular;dichotomy|0
-ParameterNumber|-bm.medianfilter.radius|Radius|None|None|0
-ParameterNumber|-bm.medianfilter.incoherence|Incoherence threshold|None|None|0.0
-ParameterSelection|-bm.initdisp|Initial disparities|none;uniform;maps|0
-ParameterNumber|-bm.initdisp.uniform.hdisp|Horizontal initial disparity|None|None|0
-ParameterNumber|-bm.initdisp.uniform.vdisp|Vertical initial disparity|None|None|0
-ParameterNumber|-bm.initdisp.uniform.hrad|Horizontal exploration radius|None|None|0
-ParameterNumber|-bm.initdisp.uniform.vrad|Vertical exploration radius|None|None|0
-ParameterRaster|-bm.initdisp.maps.hmap|Horizontal initial disparity map|False
-ParameterRaster|-bm.initdisp.maps.vmap|Vertical initial disparity map|False
-ParameterNumber|-bm.initdisp.maps.hrad|Horizontal exploration radius|None|None|0
-ParameterNumber|-bm.initdisp.maps.vrad|Vertical exploration radius|None|None|0
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/BundleToPerfectSensor.txt b/python/plugins/processing/otb/description/BundleToPerfectSensor.txt
deleted file mode 100644
index c1ee11fe9a9a..000000000000
--- a/python/plugins/processing/otb/description/BundleToPerfectSensor.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-BundleToPerfectSensor
-otbcli_BundleToPerfectSensor
-Bundle to perfect sensor
-Geometry
-ParameterRaster|-inp|Input PAN Image|False
-ParameterRaster|-inxs|Input XS Image|False
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterNumber|-lms|Spacing of the deformation field|None|None|0.0
-OutputRaster|-out|Output image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/ClassificationMapRegularization.xml b/python/plugins/processing/otb/description/ClassificationMapRegularization.xml
new file mode 100644
index 000000000000..cc586e3a27a5
--- /dev/null
+++ b/python/plugins/processing/otb/description/ClassificationMapRegularization.xml
@@ -0,0 +1,64 @@
+
+ ClassificationMapRegularization
+ otbcli_ClassificationMapRegularization
+ Classification Map Regularization
+ Learning
+ Filters the input labeled image using Majority Voting in a ball shaped neighbordhood.
+
+ ParameterRaster
+ io.in
+ Input classification image
+ The input labeled image to regularize.
+ False
+
+
+ OutputRaster
+ io.out
+ Output regularized image
+ The output regularized labeled image.
+
+
+
+ ParameterNumber
+ ip.radius
+ Structuring element radius (in pixels)
+ The radius of the ball shaped structuring element (expressed in pixels). By default, 'ip.radius = 1 pixel'.
+
+
+ 1
+
+
+ ParameterBoolean
+ ip.suvbool
+ Multiple majority: Undecided(X)/Original
+ Pixels with more than 1 majority class are marked as Undecided if this parameter is checked (true), or keep their Original labels otherwise (false). Please note that the Undecided value must be different from existing labels in the input labeled image. By default, 'ip.suvbool = false'.
+ True
+
+
+ ParameterNumber
+ ip.nodatalabel
+ Label for the NoData class
+ Label for the NoData class. Such input pixels keep their NoData label in the output image. By default, 'ip.nodatalabel = 0'.
+
+
+ 0
+
+
+ ParameterNumber
+ ip.undecidedlabel
+ Label for the Undecided class
+ Label for the Undecided class. By default, 'ip.undecidedlabel = 0'.
+
+
+ 0
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ClassificationRegularization.txt b/python/plugins/processing/otb/description/ClassificationRegularization.txt
deleted file mode 100644
index c57ede531431..000000000000
--- a/python/plugins/processing/otb/description/ClassificationRegularization.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Classification Regularization
-otbcli_ClassificationMapRegularization
-Classification Regularization
-Learning
-ParameterRaster|-io.in|Input Image|False
-OutputRaster|-io.out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-ip.radius|Structuring Element Radius|None|None|1
-ParameterBoolean|-ip.suvbool|Multiple majority|False
-ParameterNumber|-ip.nodatalabel|Label for the NoData class|None|None|0
-ParameterNumber|-ip.undecidedlabel|Label for the Undecided class|None|None|0
diff --git a/python/plugins/processing/otb/description/ColorMapping-continuous.xml b/python/plugins/processing/otb/description/ColorMapping-continuous.xml
new file mode 100644
index 000000000000..acf6d0e55b28
--- /dev/null
+++ b/python/plugins/processing/otb/description/ColorMapping-continuous.xml
@@ -0,0 +1,98 @@
+
+ ColorMapping-continuous
+ otbcli_ColorMapping
+ ColorMapping (continuous)
+ Image Manipulation
+ Maps an input label image to 8-bits RGB using look-up tables.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image filename
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image filename
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ op
+ Operation
+ Selection of the operation to execute (default is : label to color).
+
+
+ labeltocolor
+
+
+ 0
+
+
+ ParameterSelection
+ method
+ Color mapping method
+ Selection of color mapping methods and their parameters.
+
+
+ continuous
+
+
+ 0
+
+
+ ParameterSelection
+ method.continuous.lut
+ Look-up tables
+ Available look-up tables.
+
+
+ red
+ green
+ blue
+ grey
+ hot
+ cool
+ spring
+ summer
+ autumn
+ winter
+ copper
+ jet
+ hsv
+ overunder
+ relief
+
+
+ 0
+
+
+ ParameterNumber
+ method.continuous.min
+ Mapping range lower value
+ Set the lower input value of the mapping range.
+
+
+ 0
+
+
+ ParameterNumber
+ method.continuous.max
+ Mapping range higher value
+ Set the higher input value of the mapping range.
+
+
+ 255
+
+
diff --git a/python/plugins/processing/otb/description/ColorMapping-custom.xml b/python/plugins/processing/otb/description/ColorMapping-custom.xml
new file mode 100644
index 000000000000..6257536c0886
--- /dev/null
+++ b/python/plugins/processing/otb/description/ColorMapping-custom.xml
@@ -0,0 +1,65 @@
+
+ ColorMapping-custom
+ otbcli_ColorMapping
+ ColorMapping (custom)
+ Image Manipulation
+ Maps an input label image to 8-bits RGB using look-up tables.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image filename
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image filename
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ op
+ Operation
+ Selection of the operation to execute (default is : label to color).
+
+
+ labeltocolor
+
+
+ 0
+
+
+ ParameterSelection
+ method
+ Color mapping method
+ Selection of color mapping methods and their parameters.
+
+
+ custom
+
+
+ 0
+
+
+ ParameterFile
+ method.custom.lut
+ Look-up table file
+ An ASCII file containing the look-up table
+with one color per line
+(for instance the line '1 255 0 0' means that all pixels with label 1 will be replaced by RGB color 255 0 0)
+Lines beginning with a # are ignored
+
+ False
+
+
diff --git a/python/plugins/processing/otb/description/ColorMapping-image.xml b/python/plugins/processing/otb/description/ColorMapping-image.xml
new file mode 100644
index 000000000000..60ef8c5ae9ad
--- /dev/null
+++ b/python/plugins/processing/otb/description/ColorMapping-image.xml
@@ -0,0 +1,88 @@
+
+ ColorMapping-image
+ otbcli_ColorMapping
+ ColorMapping (image)
+ Image Manipulation
+ Maps an input label image to 8-bits RGB using look-up tables.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image filename
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image filename
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ op
+ Operation
+ Selection of the operation to execute (default is : label to color).
+
+
+ labeltocolor
+
+
+ 0
+
+
+ ParameterSelection
+ method
+ Color mapping method
+ Selection of color mapping methods and their parameters.
+
+
+ image
+
+
+ 0
+
+
+ ParameterRaster
+ method.image.in
+ Support Image
+ Support image filename. For each label, the LUT is calculated from the mean pixel value in the support image, over the corresponding labeled areas. First of all, the support image is normalized with extrema rejection
+ False
+
+
+ ParameterNumber
+ method.image.nodatavalue
+ NoData value
+ NoData value for each channel of the support image, which will not be handled in the LUT estimation. If NOT checked, ALL the pixel values of the support image will be handled in the LUT estimation.
+
+
+ 0
+
+
+ ParameterNumber
+ method.image.low
+ lower quantile
+ lower quantile for image normalization
+
+
+ 2
+
+
+ ParameterNumber
+ method.image.up
+ upper quantile
+ upper quantile for image normalization
+
+
+ 2
+
+
diff --git a/python/plugins/processing/otb/description/ColorMapping-optimal.xml b/python/plugins/processing/otb/description/ColorMapping-optimal.xml
new file mode 100644
index 000000000000..d1a2922fcdcc
--- /dev/null
+++ b/python/plugins/processing/otb/description/ColorMapping-optimal.xml
@@ -0,0 +1,63 @@
+
+ ColorMapping-optimal
+ otbcli_ColorMapping
+ ColorMapping (optimal)
+ Image Manipulation
+ Maps an input label image to 8-bits RGB using look-up tables.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image filename
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image filename
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ op
+ Operation
+ Selection of the operation to execute (default is : label to color).
+
+
+ labeltocolor
+
+
+ 0
+
+
+ ParameterSelection
+ method
+ Color mapping method
+ Selection of color mapping methods and their parameters.
+
+
+ optimal
+
+
+ 0
+
+
+ ParameterNumber
+ method.optimal.background
+ Background label
+ Value of the background label
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/ColorMapping.txt b/python/plugins/processing/otb/description/ColorMapping.txt
deleted file mode 100644
index 60294b950f6a..000000000000
--- a/python/plugins/processing/otb/description/ColorMapping.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-ColorMapping
-otbcli_ColorMapping
-Color Mapping
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterSelection|-op|Operation|labeltocolor;colortolabel|0
-ParameterNumber|-op.colortolabel.notfound|Not Found Label|None|None|404
-ParameterSelection|-method|Color mapping method|custom;continuous;optimal;image|0
-ParameterFile|-method.custom.lut|Look-up table file||
-ParameterSelection|-method.continuous.lut|Look-up tables|red;green;blue;grey;hot;cool;spring;summer;autumn;winter;copper;jet;hsv;overunder;relief|0
-ParameterNumber|-method.continuous.min|Mapping range lower value|None|None|0.0
-ParameterNumber|-method.continuous.max|Mapping range higher value|None|None|255.0
-ParameterNumber|-method.optimal.background|Background label|None|None|0
-ParameterRaster|-method.image.in|Support Image|False
-ParameterNumber|-method.image.low|lower quantile|None|None|2
-ParameterNumber|-method.image.up|upper quantile|None|None|2
diff --git a/python/plugins/processing/otb/description/CompareImages.txt b/python/plugins/processing/otb/description/CompareImages.txt
deleted file mode 100644
index 4bfde76f7cf0..000000000000
--- a/python/plugins/processing/otb/description/CompareImages.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-CompareImages
-otbcli_CompareImages
-Images comparaison
-Miscellaneous
-ParameterRaster|-ref.in|Reference image|False
-ParameterNumber|-ref.channel|Reference image channel|None|None|1
-ParameterRaster|-meas.in|Measured image|False
-ParameterNumber|-meas.channel|Measured image channel|None|None|1
-ParameterNumber|-roi.startx|Start X|None|None|0
-ParameterNumber|-roi.starty|Start Y|None|None|0
-ParameterNumber|-roi.sizex|Size X|None|None|0
-ParameterNumber|-roi.sizey|Size Y|None|None|0
-ParameterNumber|-mse|MSE|None|None|0.0
-ParameterNumber|-mae|MAE|None|None|0.0
-ParameterNumber|-psnr|PSNR|None|None|0.0
diff --git a/python/plugins/processing/otb/description/CompareImages.xml b/python/plugins/processing/otb/description/CompareImages.xml
new file mode 100644
index 000000000000..7e23f83650ab
--- /dev/null
+++ b/python/plugins/processing/otb/description/CompareImages.xml
@@ -0,0 +1,75 @@
+
+ CompareImages
+ otbcli_CompareImages
+ Images comparaison
+ Miscellaneous
+ Estimator between 2 images.
+
+ ParameterRaster
+ ref.in
+ Reference image
+ Image used as reference in the comparison
+ False
+
+
+ ParameterNumber
+ ref.channel
+ Reference image channel
+ Used channel for the reference image
+
+
+ 1
+
+
+ ParameterRaster
+ meas.in
+ Measured image
+ Image used as measured in the comparison
+ False
+
+
+ ParameterNumber
+ meas.channel
+ Measured image channel
+ Used channel for the measured image
+
+
+ 1
+
+
+ ParameterNumber
+ roi.startx
+ Start X
+ ROI start x position.
+
+
+ 0
+
+
+ ParameterNumber
+ roi.starty
+ Start Y
+ ROI start y position.
+
+
+ 0
+
+
+ ParameterNumber
+ roi.sizex
+ Size X
+ Size along x in pixels.
+
+
+ 0
+
+
+ ParameterNumber
+ roi.sizey
+ Size Y
+ Size along y in pixels.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/ComputeConfusionMatrix-raster.xml b/python/plugins/processing/otb/description/ComputeConfusionMatrix-raster.xml
new file mode 100644
index 000000000000..d42f3fd620e0
--- /dev/null
+++ b/python/plugins/processing/otb/description/ComputeConfusionMatrix-raster.xml
@@ -0,0 +1,58 @@
+
+ ComputeConfusionMatrix-raster
+ otbcli_ComputeConfusionMatrix
+ ComputeConfusionMatrix (raster)
+ Learning
+ Computes the confusion matrix of a classification
+
+ ParameterRaster
+ in
+ Input Image
+ The input classification image.
+ False
+
+
+ OutputFile
+ out
+ Matrix output
+ Filename to store the output matrix (csv format)
+
+
+
+ ParameterSelection
+ ref
+ Ground truth
+ Choice of ground truth format
+
+
+ raster
+
+
+ 0
+
+
+ ParameterRaster
+ ref.raster.in
+ Input reference image
+ Input image containing the ground truth labels
+ False
+
+
+ ParameterNumber
+ nodatalabel
+ Value for nodata pixels
+ Label for the NoData class. Such input pixels will be discarded from the ground truth and from the input classification map. By default, 'nodatalabel = 0'.
+
+
+ 0
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ComputeConfusionMatrix-vector.xml b/python/plugins/processing/otb/description/ComputeConfusionMatrix-vector.xml
new file mode 100644
index 000000000000..42b830ccafcc
--- /dev/null
+++ b/python/plugins/processing/otb/description/ComputeConfusionMatrix-vector.xml
@@ -0,0 +1,68 @@
+
+ ComputeConfusionMatrix-vector
+ otbcli_ComputeConfusionMatrix
+ ComputeConfusionMatrix (vector)
+ Learning
+ Computes the confusion matrix of a classification
+
+ ParameterRaster
+ in
+ Input Image
+ The input classification image.
+ False
+
+
+ OutputFile
+ out
+ Matrix output
+ Filename to store the output matrix (csv format)
+
+
+
+ ParameterSelection
+ ref
+ Ground truth
+ Choice of ground truth format
+
+
+ vector
+
+
+ 0
+
+
+ ParameterFile
+ ref.vector.in
+ Input reference vector data
+ Input vector data of the ground truth
+
+ False
+
+
+ ParameterString
+ ref.vector.field
+ Field name
+ Field name containing the label values
+ Class
+
+ True
+
+
+ ParameterNumber
+ nodatalabel
+ Value for nodata pixels
+ Label for the NoData class. Such input pixels will be discarded from the ground truth and from the input classification map. By default, 'nodatalabel = 0'.
+
+
+ 0
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ComputeConfusionMatrix.txt b/python/plugins/processing/otb/description/ComputeConfusionMatrix.txt
deleted file mode 100644
index 75a073ef6505..000000000000
--- a/python/plugins/processing/otb/description/ComputeConfusionMatrix.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Compute Confusion Matrix
-otbcli_ComputeConfusionMatrix
-Compute Confusion Matrix
-Learning
-ParameterRaster|-in|Input Image|False
-OutputFile|-out|Matrix Output
-ParameterSelection|-ref|Ground Truth|raster;vector|1
-ParameterRaster|-ref.raster.in|Input Reference Image|False
-ParameterFile|-ref.vector.in|Input Vector Data|False
-ParameterString|-ref.vector.field|Field name|None|None|dn
-ParameterNumber|-labels|Number of labels|None|None|2
-ParameterNumber|-nodata|Value for nodata pixels|None|None|0
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-
diff --git a/python/plugins/processing/otb/description/ComputeImagesStatistics.txt b/python/plugins/processing/otb/description/ComputeImagesStatistics.txt
deleted file mode 100644
index c5991623ef6e..000000000000
--- a/python/plugins/processing/otb/description/ComputeImagesStatistics.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-ComputeImagesStatistics
-otbcli_ComputeImagesStatistics
-Compute Images second order statistics
-Learning
-ParameterMultipleInput|-il|Input images|3|False
-OutputFile|-out|Output XML file
diff --git a/python/plugins/processing/otb/description/ComputeImagesStatistics.xml b/python/plugins/processing/otb/description/ComputeImagesStatistics.xml
new file mode 100644
index 000000000000..43c742d6796a
--- /dev/null
+++ b/python/plugins/processing/otb/description/ComputeImagesStatistics.xml
@@ -0,0 +1,31 @@
+
+ ComputeImagesStatistics
+ otbcli_ComputeImagesStatistics
+ Compute Images second order statistics
+ Learning
+ Computes global mean and standard deviation for each band from a set of images and optionally saves the results in an XML file.
+
+ ParameterMultipleInput
+ il
+ Input images
+ List of input images filenames.
+
+ False
+
+
+ ParameterNumber
+ bv
+ Background Value
+ Background value to ignore in statistics computation.
+
+
+ 0.0
+
+
+ OutputFile
+ out
+ Output XML file
+ XML filename where the statistics are saved for future reuse.
+
+
+
diff --git a/python/plugins/processing/otb/description/ComputeModulusAndPhase-OneEntry.xml b/python/plugins/processing/otb/description/ComputeModulusAndPhase-OneEntry.xml
new file mode 100644
index 000000000000..bdc2f5229eff
--- /dev/null
+++ b/python/plugins/processing/otb/description/ComputeModulusAndPhase-OneEntry.xml
@@ -0,0 +1,49 @@
+
+ ComputeModulusAndPhase-one-OneEntry
+ otbcli_ComputeModulusAndPhase
+ ComputeModulusAndPhase-one (OneEntry)
+ Miscellaneous
+ This application computes the modulus and the phase of a complex SAR data.
+
+ ParameterSelection
+ nbinput
+ Number Of inputs
+ Choice about the number of input files used to store the real and imaginary part of the SAR image
+
+
+ one
+
+
+ 0
+
+
+ ParameterRaster
+ nbinput.one.in
+ Input image
+ Image file with SAR data.
+ False
+
+
+ OutputRaster
+ mod
+ Modulus
+ Modulus of the input: sqrt(real*real + imag*imag).
+
+
+
+ OutputRaster
+ pha
+ Phase
+ Phase of the input: atan2(imag, real).
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ComputeModulusAndPhase-TwoEntries.xml b/python/plugins/processing/otb/description/ComputeModulusAndPhase-TwoEntries.xml
new file mode 100644
index 000000000000..336924e5605d
--- /dev/null
+++ b/python/plugins/processing/otb/description/ComputeModulusAndPhase-TwoEntries.xml
@@ -0,0 +1,56 @@
+
+ ComputeModulusAndPhase-two-TwoEntries
+ otbcli_ComputeModulusAndPhase
+ ComputeModulusAndPhase-two (TwoEntries)
+ Miscellaneous
+ This application computes the modulus and the phase of a complex SAR data.
+
+ ParameterSelection
+ nbinput
+ Number Of inputs
+ Choice about the number of input files used to store the real and imaginary part of the SAR image
+
+
+ two
+
+
+ 0
+
+
+ ParameterRaster
+ nbinput.two.re
+ Real part input
+ Image file with real part of the SAR data.
+ False
+
+
+ ParameterRaster
+ nbinput.two.im
+ Imaginary part input
+ Image file with imaginary part of the SAR data.
+ False
+
+
+ OutputRaster
+ mod
+ Modulus
+ Modulus of the input: sqrt(real*real + imag*imag).
+
+
+
+ OutputRaster
+ pha
+ Phase
+ Phase of the input: atan2(imag, real).
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ComputePolylineFeatureFromImage.txt b/python/plugins/processing/otb/description/ComputePolylineFeatureFromImage.txt
deleted file mode 100644
index f0fafbaa2113..000000000000
--- a/python/plugins/processing/otb/description/ComputePolylineFeatureFromImage.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-ComputePolylineFeatureFromImage
-otbcli_ComputePolylineFeatureFromImage
-Compute Polyline Feature From Image
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-ParameterVector|-vd|Vector Data|-1|False
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterString|-expr|Feature expression|
-ParameterString|-field|Feature name|
-OutputVector|-out|Output Vector Data
diff --git a/python/plugins/processing/otb/description/ConcatenateImages.txt b/python/plugins/processing/otb/description/ConcatenateImages.txt
deleted file mode 100644
index 1f4173ed8059..000000000000
--- a/python/plugins/processing/otb/description/ConcatenateImages.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-ConcatenateImages
-otbcli_ConcatenateImages
-Images Concatenation
-Image Manipulation
-ParameterMultipleInput|-il|Input images list|3|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/ConcatenateImages.xml b/python/plugins/processing/otb/description/ConcatenateImages.xml
new file mode 100644
index 000000000000..eea11d897702
--- /dev/null
+++ b/python/plugins/processing/otb/description/ConcatenateImages.xml
@@ -0,0 +1,31 @@
+
+ ConcatenateImages
+ otbcli_ConcatenateImages
+ Images Concatenation
+ Image Manipulation
+ Concatenate a list of images of the same size into a single multi-channel one.
+
+ ParameterMultipleInput
+ il
+ Input images list
+ The list of images to concatenate
+
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ The concatenated output image
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ConcatenateVectorData.txt b/python/plugins/processing/otb/description/ConcatenateVectorData.txt
deleted file mode 100644
index a7c76ec45d34..000000000000
--- a/python/plugins/processing/otb/description/ConcatenateVectorData.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-ConcatenateVectorData
-otbcli_ConcatenateVectorData
-Concatenate
-Vector Data Manipulation
-ParameterMultipleInput|-vd|Input VectorDatas to concatenate|-1|False
-OutputVector|-out|Concatenated VectorData
diff --git a/python/plugins/processing/otb/description/ConcatenateVectorData.xml b/python/plugins/processing/otb/description/ConcatenateVectorData.xml
new file mode 100644
index 000000000000..9b95a36fee95
--- /dev/null
+++ b/python/plugins/processing/otb/description/ConcatenateVectorData.xml
@@ -0,0 +1,22 @@
+
+ ConcatenateVectorData
+ otbcli_ConcatenateVectorData
+ Concatenate
+ Vector Data Manipulation
+ Concatenate VectorDatas
+
+ ParameterMultipleInput
+ vd
+ Input VectorDatas to concatenate
+ VectorData files to be concatenated in an unique VectorData
+
+ False
+
+
+ OutputVector
+ out
+ Concatenated VectorData
+ Output conctenated VectorData
+
+
+
diff --git a/python/plugins/processing/otb/description/ConnectedComponentSegmentation.txt b/python/plugins/processing/otb/description/ConnectedComponentSegmentation.txt
deleted file mode 100644
index 26d18616f378..000000000000
--- a/python/plugins/processing/otb/description/ConnectedComponentSegmentation.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-ConnectedComponentSegmentation
-otbcli_ConnectedComponentSegmentation
-Connected Component Segmentation
-Segmentation
-ParameterRaster|-in|Input Image|False
-OutputVector|-out|Output Shape
-ParameterString|-mask|Mask expression|
-ParameterString|-expr|Connected Component Expression|
-ParameterNumber|-minsize|Minimum Object Size|None|None|2
-ParameterString|-obia|OBIA Expression|
-ParameterFile|-elev.dem|DEM directory|True
-ParameterFile|-elev.geoid|Geoid File||True
-ParameterNumber|-elev.default|Average Elevation|None|None|0.0
diff --git a/python/plugins/processing/otb/description/ConnectedComponentSegmentation.xml b/python/plugins/processing/otb/description/ConnectedComponentSegmentation.xml
new file mode 100644
index 000000000000..8fad26e1b547
--- /dev/null
+++ b/python/plugins/processing/otb/description/ConnectedComponentSegmentation.xml
@@ -0,0 +1,66 @@
+
+ ConnectedComponentSegmentation
+ otbcli_ConnectedComponentSegmentation
+ Connected Component Segmentation
+ Segmentation
+ Connected component segmentation and object based image filtering of the input image according to user-defined criterions.
+
+ ParameterRaster
+ in
+ Input Image
+ The image to segment.
+ False
+
+
+ OutputVector
+ out
+ Output Shape
+ The segmentation shape.
+
+
+
+ ParameterString
+ mask
+ Mask expression
+ Mask mathematical expression (only if support image is given)
+
+
+ True
+
+
+ ParameterString
+ expr
+ Connected Component Expression
+ Formula used for connected component segmentation
+
+
+ False
+
+
+ ParameterNumber
+ minsize
+ Minimum Object Size
+ Min object size (area in pixel)
+
+
+ 2
+
+
+ ParameterString
+ obia
+ OBIA Expression
+ OBIA mathematical expression
+
+
+ True
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/Convert.txt b/python/plugins/processing/otb/description/Convert.txt
deleted file mode 100644
index b7208bc008b2..000000000000
--- a/python/plugins/processing/otb/description/Convert.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Convert
-otbcli_Convert
-Image Conversion
-Image Manipulation
-ParameterRaster|-in|Input image|False
-ParameterSelection|-type|Rescale type|none;linear;log2|0
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/ConvertCartoToGeoPoint.txt b/python/plugins/processing/otb/description/ConvertCartoToGeoPoint.txt
deleted file mode 100644
index db57628e80ba..000000000000
--- a/python/plugins/processing/otb/description/ConvertCartoToGeoPoint.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-ConvertCartoToGeoPoint
-otbcli_ConvertCartoToGeoPoint
-Cartographic to geographic coordinates conversion
-Geometry
-ParameterNumber|-carto.x|X cartographic coordinates|None|None|0.0
-ParameterNumber|-carto.y|Y cartographic coordinates|None|None|0.0
-ParameterSelection|-mapproj|Output Cartographic Map Projection|utm;lambert2;lambert93;wgs;epsg|4
-ParameterNumber|-mapproj.utm.zone|Zone number|None|None|31
-ParameterBoolean|-mapproj.utm.northhem|Northern Hemisphere|
-ParameterNumber|-mapproj.epsg.code|EPSG Code|None|None|32631
-ParameterNumber|-long|Output long|None|None|0.0
-ParameterNumber|-lat|Output lat|None|None|0.0
diff --git a/python/plugins/processing/otb/description/ConvertSensorToGeoPoint.txt b/python/plugins/processing/otb/description/ConvertSensorToGeoPoint.txt
deleted file mode 100644
index 460155c241fe..000000000000
--- a/python/plugins/processing/otb/description/ConvertSensorToGeoPoint.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-ConvertSensorToGeoPoint
-otbcli_ConvertSensorToGeoPoint
-Convert Sensor Point To Geographic Point
-Geometry
-ParameterRaster|-in|Sensor image|False
-ParameterNumber|-input.idx|X value of desired point|None|None|0.0
-ParameterNumber|-input.idy|Y value of desired point|None|None|0.0
-ParameterNumber|-output.idx|Output Point Longitude|None|None|0.0
-ParameterNumber|-output.idy|Output Point Latitude|None|None|0.0
-ParameterString|-output.town|Main town near the coordinates computed|
-ParameterString|-output.country|Country of the image|
diff --git a/python/plugins/processing/otb/description/DSFuzzyModelEstimation.txt b/python/plugins/processing/otb/description/DSFuzzyModelEstimation.txt
deleted file mode 100644
index 9ba7a4dd04a2..000000000000
--- a/python/plugins/processing/otb/description/DSFuzzyModelEstimation.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-DSFuzzyModelEstimation
-otbcli_DSFuzzyModelEstimation
-Fuzzy Model estimation
-Feature Extraction
-ParameterVector|-psin|Input Positive Vector Data|-1|False
-ParameterVector|-nsin|Input Negative Vector Data|-1|False
-ParameterString|-cri|Criterion|((Belief + Plausibility)/2.)
-ParameterNumber|-wgt|Weighting|None|None|0.5
-ParameterFile|-initmod|initialization model||
-ParameterNumber|-maxnbit|Maximum number of iterations|None|None|200
-ParameterBoolean|-optobs|Optimizer Observer|
-OutputFile|-out|Output filename
diff --git a/python/plugins/processing/otb/description/DimensionalityReduction-ica.xml b/python/plugins/processing/otb/description/DimensionalityReduction-ica.xml
new file mode 100644
index 000000000000..4123cfec5f38
--- /dev/null
+++ b/python/plugins/processing/otb/description/DimensionalityReduction-ica.xml
@@ -0,0 +1,81 @@
+
+ DimensionalityReduction-ica
+ otbcli_DimensionalityReduction
+ DimensionalityReduction (ica)
+ Image Filtering
+ Perform Dimension reduction of the input image.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to apply dimensionality reduction.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ output image. Components are ordered by decreasing eigenvalues.
+
+
+
+ OutputRaster
+ outinv
+ Inverse Output Image
+ reconstruct output image.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the reduction dimension method.
+
+
+ ica
+
+
+ 0
+
+
+ ParameterNumber
+ method.ica.iter
+ number of iterations
+
+
+
+ 20
+
+
+ ParameterNumber
+ method.ica.mu
+ Give the increment weight of W in [0, 1]
+
+
+
+ 1
+
+
+ ParameterNumber
+ nbcomp
+ Number of Components.
+ Number of relevant components kept. By default all components are kept.
+
+
+ 0
+
+
+ ParameterBoolean
+ normalize
+ Normalize.
+ center AND reduce data before Dimensionality reduction.
+ True
+
+
+ OutputFile
+ outmatrix
+ Transformation matrix output
+ Filename to store the transformation matrix (csv format)
+
+
+
diff --git a/python/plugins/processing/otb/description/DimensionalityReduction-maf.xml b/python/plugins/processing/otb/description/DimensionalityReduction-maf.xml
new file mode 100644
index 000000000000..99936940c9e4
--- /dev/null
+++ b/python/plugins/processing/otb/description/DimensionalityReduction-maf.xml
@@ -0,0 +1,56 @@
+
+ DimensionalityReduction-maf
+ otbcli_DimensionalityReduction
+ DimensionalityReduction (maf)
+ Image Filtering
+ Perform Dimension reduction of the input image.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to apply dimensionality reduction.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ output image. Components are ordered by decreasing eigenvalues.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the reduction dimension method.
+
+
+ maf
+
+
+ 0
+
+
+ ParameterNumber
+ nbcomp
+ Number of Components.
+ Number of relevant components kept. By default all components are kept.
+
+
+ 0
+
+
+ ParameterBoolean
+ normalize
+ Normalize.
+ center AND reduce data before Dimensionality reduction.
+ True
+
+
+ OutputFile
+ outmatrix
+ Transformation matrix output
+ Filename to store the transformation matrix (csv format)
+
+
+
diff --git a/python/plugins/processing/otb/description/DimensionalityReduction-napca.xml b/python/plugins/processing/otb/description/DimensionalityReduction-napca.xml
new file mode 100644
index 000000000000..752c0fe15416
--- /dev/null
+++ b/python/plugins/processing/otb/description/DimensionalityReduction-napca.xml
@@ -0,0 +1,81 @@
+
+ DimensionalityReduction-napca
+ otbcli_DimensionalityReduction
+ DimensionalityReduction (napca)
+ Image Filtering
+ Perform Dimension reduction of the input image.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to apply dimensionality reduction.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ output image. Components are ordered by decreasing eigenvalues.
+
+
+
+ OutputRaster
+ outinv
+ Inverse Output Image
+ reconstruct output image.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the reduction dimension method.
+
+
+ napca
+
+
+ 0
+
+
+ ParameterNumber
+ method.napca.radiusx
+ Set the x radius of the sliding window.
+
+
+
+ 1
+
+
+ ParameterNumber
+ method.napca.radiusy
+ Set the y radius of the sliding window.
+
+
+
+ 1
+
+
+ ParameterNumber
+ nbcomp
+ Number of Components.
+ Number of relevant components kept. By default all components are kept.
+
+
+ 0
+
+
+ ParameterBoolean
+ normalize
+ Normalize.
+ center AND reduce data before Dimensionality reduction.
+ True
+
+
+ OutputFile
+ outmatrix
+ Transformation matrix output
+ Filename to store the transformation matrix (csv format)
+
+
+
diff --git a/python/plugins/processing/otb/description/DimensionalityReduction-pca.xml b/python/plugins/processing/otb/description/DimensionalityReduction-pca.xml
new file mode 100644
index 000000000000..2a6e83af65e4
--- /dev/null
+++ b/python/plugins/processing/otb/description/DimensionalityReduction-pca.xml
@@ -0,0 +1,63 @@
+
+ DimensionalityReduction-pca
+ otbcli_DimensionalityReduction
+ DimensionalityReduction (pca)
+ Image Filtering
+ Perform Dimension reduction of the input image.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to apply dimensionality reduction.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ output image. Components are ordered by decreasing eigenvalues.
+
+
+
+ OutputRaster
+ outinv
+ Inverse Output Image
+ reconstruct output image.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the reduction dimension method.
+
+
+ pca
+
+
+ 0
+
+
+ ParameterNumber
+ nbcomp
+ Number of Components.
+ Number of relevant components kept. By default all components are kept.
+
+
+ 0
+
+
+ ParameterBoolean
+ normalize
+ Normalize.
+ center AND reduce data before Dimensionality reduction.
+ True
+
+
+ OutputFile
+ outmatrix
+ Transformation matrix output
+ Filename to store the transformation matrix (csv format)
+
+
+
diff --git a/python/plugins/processing/otb/description/DimensionalityReduction.txt b/python/plugins/processing/otb/description/DimensionalityReduction.txt
deleted file mode 100644
index f629a4590776..000000000000
--- a/python/plugins/processing/otb/description/DimensionalityReduction.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-DimensionalityReduction
-otbcli_DimensionalityReduction
-Dimensionality reduction application
-Image Filtering
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-rescale.outmin|Output min value|None|None|0.0
-ParameterNumber|-rescale.outmax|Output max value|None|None|255.0
-OutputRaster|-outinv| Inverse Output Image
-ParameterSelection|-method|Algorithm|pca;napca;maf;ica|0
-ParameterNumber|-method.napca.radiusx|Set the x radius of the sliding window.|None|None|1
-ParameterNumber|-method.napca.radiusy|Set the y radius of the sliding window.|None|None|1
-ParameterNumber|-method.ica.iter|number of iterations |None|None|20
-ParameterNumber|-method.ica.mu|Give the increment weight of W in [0, 1]|None|None|1.0
-ParameterNumber|-nbcomp|Number of Components.|None|None|0
-ParameterBoolean|-normalize|Normalize.|
diff --git a/python/plugins/processing/otb/description/DisparityMapToElevationMap.txt b/python/plugins/processing/otb/description/DisparityMapToElevationMap.txt
deleted file mode 100644
index d2de6103203d..000000000000
--- a/python/plugins/processing/otb/description/DisparityMapToElevationMap.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-DisparityMapToElevationMap
-otbcli_DisparityMapToElevationMap
-Disparity map to elevation map
-Stereo
-ParameterRaster|-io.in|Input disparity map|False
-ParameterRaster|-io.left|Left sensor image|False
-ParameterRaster|-io.right|Right sensor image|False
-ParameterRaster|-io.lgrid|Left Grid|False
-ParameterRaster|-io.rgrid|Right Grid|False
-OutputRaster|-io.out|Output elevation map
-ParameterRaster|-io.mask|Disparity mask|True
-ParameterNumber|-step|DEM step|None|None|5.0
-ParameterNumber|-hmin|Minimum elevation expected|None|None|0.0
-ParameterNumber|-hmax|Maximum elevation expected|None|None|100.0
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/EdgeExtraction-gradient.xml b/python/plugins/processing/otb/description/EdgeExtraction-gradient.xml
new file mode 100644
index 000000000000..4c367d9593e2
--- /dev/null
+++ b/python/plugins/processing/otb/description/EdgeExtraction-gradient.xml
@@ -0,0 +1,51 @@
+
+ EdgeExtraction-gradient
+ otbcli_EdgeExtraction
+ EdgeExtraction (gradient)
+ Feature Extraction
+ Computes edge features on every pixel of the input image selected channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to compute the features on.
+ False
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ filter
+ Edge feature
+ Choice of edge feature
+
+
+ gradient
+
+
+ 0
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the edge features.
+
+
+
diff --git a/python/plugins/processing/otb/description/EdgeExtraction-sobel.xml b/python/plugins/processing/otb/description/EdgeExtraction-sobel.xml
new file mode 100644
index 000000000000..80d9e1158b86
--- /dev/null
+++ b/python/plugins/processing/otb/description/EdgeExtraction-sobel.xml
@@ -0,0 +1,51 @@
+
+ EdgeExtraction-sobel
+ otbcli_EdgeExtraction
+ EdgeExtraction (sobel)
+ Feature Extraction
+ Computes edge features on every pixel of the input image selected channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to compute the features on.
+ False
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ filter
+ Edge feature
+ Choice of edge feature
+
+
+ sobel
+
+
+ 0
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the edge features.
+
+
+
diff --git a/python/plugins/processing/otb/description/EdgeExtraction-touzi.xml b/python/plugins/processing/otb/description/EdgeExtraction-touzi.xml
new file mode 100644
index 000000000000..4238d64bbf3a
--- /dev/null
+++ b/python/plugins/processing/otb/description/EdgeExtraction-touzi.xml
@@ -0,0 +1,60 @@
+
+ EdgeExtraction-touzi
+ otbcli_EdgeExtraction
+ EdgeExtraction (touzi)
+ Feature Extraction
+ Computes edge features on every pixel of the input image selected channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to compute the features on.
+ False
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ filter
+ Edge feature
+ Choice of edge feature
+
+
+ touzi
+
+
+ 0
+
+
+ ParameterNumber
+ filter.touzi.xradius
+ The Radius
+ The Radius
+
+
+ 1
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the edge features.
+
+
+
diff --git a/python/plugins/processing/otb/description/EdgeExtraction.txt b/python/plugins/processing/otb/description/EdgeExtraction.txt
deleted file mode 100644
index 73d54ee7819b..000000000000
--- a/python/plugins/processing/otb/description/EdgeExtraction.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Edge Extraction
-otbcli_EdgeExtraction
-Edge Extraction
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-channel|Selected Channel|None|None|1
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterSelection|-filter|Edge feature|gradient;sobel;touzi|0
-ParameterNumber|-filter.touzi.xradius|The X Radius|None|None|1
-ParameterNumber|-filter.touzi.yradius|The Y Radius|None|None|1
-
diff --git a/python/plugins/processing/otb/description/EdisonMeanShiftSegmentation.txt b/python/plugins/processing/otb/description/EdisonMeanShiftSegmentation.txt
deleted file mode 100644
index fc6d1696a095..000000000000
--- a/python/plugins/processing/otb/description/EdisonMeanShiftSegmentation.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-EdisonMeanShiftSegmentation
-otbcli_Segmentation
-Edison Mean Shift segmentation (labeled raster output)
-Segmentation
-ParameterRaster|-filter edison -in|Input Image|False
-ParameterNumber|-filter.edison.spatialr|Spatial radius|None|None|5
-ParameterNumber|-filter.edison.ranger|Range radius|None|None|15.0
-ParameterNumber|-filter.edison.minsize|Min region size|0|None|100
-ParameterNumber|-filter.edison.scale|Scale Factor|0|None|1.0
-OutputRaster|-mode raster -mode.raster.out|Output labeled image
-
diff --git a/python/plugins/processing/otb/description/EdisonMeanShiftSegmentation_vector.txt b/python/plugins/processing/otb/description/EdisonMeanShiftSegmentation_vector.txt
deleted file mode 100644
index c27d490c60f5..000000000000
--- a/python/plugins/processing/otb/description/EdisonMeanShiftSegmentation_vector.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-EdisonMeanShiftSegmentationVector
-otbcli_Segmentation
-Edison Mean Shift segmentation (large-scale, vector output)
-Segmentation
-ParameterRaster|-filter edison -in|Input Image|False
-ParameterNumber|-filter.edison.spatialr|Spatial radius|None|None|5
-ParameterNumber|-filter.edison.ranger|Range radius|None|None|15.0
-ParameterNumber|-filter.edison.minsize|Min region size|0|None|100
-ParameterNumber|-filter.edison.scale|Scale Factor|0|None|1.0
-OutputVector|-mode vector -mode.vector.out|Output vector file
-ParameterVector|-mode.vector.inmask|Mask Image|-1|True
-ParameterBoolean|-mode.vector.neighbor|8-neighbor connectivity|False
-ParameterBoolean|-mode.vector.stitch|Stitch polygons|True
-ParameterNumber|-mode.vector.minsize|Minimum object size|1|None|1
-ParameterNumber|-mode.vector.simplify|Simplify polygons|None|None|0.0
-ParameterString|-mode.vector.layername|Layer name |layer
-ParemeterString|-mode.vector.fieldname|Geometry index field name|DN
-ParameterNumber|-mode.vector.tilesize|Tile size|0|None|1024
-ParameterNumber|-mode.vector.startlabel|Starting geometry index|1|None|1
-ParameterSelection|-mode.vector.outmode|Writing mode (update file/overwrite file/overwrite layer/update layer)|ulco;ovw;ulovw;ulu|0
diff --git a/python/plugins/processing/otb/description/ExtractROI-fit.xml b/python/plugins/processing/otb/description/ExtractROI-fit.xml
new file mode 100644
index 000000000000..968cbd8f2775
--- /dev/null
+++ b/python/plugins/processing/otb/description/ExtractROI-fit.xml
@@ -0,0 +1,58 @@
+
+ ExtractROI-fit
+ otbcli_ExtractROI
+ ExtractROI (fit)
+ Image Manipulation
+ Extract a ROI defined by the user.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ mode
+ Extraction mode
+
+
+
+ fit
+
+
+ 0
+
+
+ ParameterRaster
+ mode.fit.ref
+ Reference image
+ Reference image to define the ROI
+ False
+
+
+ ParameterNumber
+ mode.fit.elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/ExtractROI-standard.xml b/python/plugins/processing/otb/description/ExtractROI-standard.xml
new file mode 100644
index 000000000000..4eb65f3044fc
--- /dev/null
+++ b/python/plugins/processing/otb/description/ExtractROI-standard.xml
@@ -0,0 +1,78 @@
+
+ ExtractROI-standard
+ otbcli_ExtractROI
+ ExtractROI (standard)
+ Image Manipulation
+ Extract a ROI defined by the user.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ mode
+ Extraction mode
+
+
+
+ standard
+
+
+ 0
+
+
+ ParameterNumber
+ startx
+ Start X
+ ROI start x position.
+
+
+ 0
+
+
+ ParameterNumber
+ starty
+ Start Y
+ ROI start y position.
+
+
+ 0
+
+
+ ParameterNumber
+ sizex
+ Size X
+ size along x in pixels.
+
+
+ 0
+
+
+ ParameterNumber
+ sizey
+ Size Y
+ size along y in pixels.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/ExtractROI.txt b/python/plugins/processing/otb/description/ExtractROI.txt
deleted file mode 100644
index 411e7b4bdba7..000000000000
--- a/python/plugins/processing/otb/description/ExtractROI.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-ExtractROI
-otbcli_ExtractROI
-Extract ROI
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-startx|Start X|None|None|0
-ParameterNumber|-starty|Start Y|None|None|0
-ParameterNumber|-sizex|Size X|None|None|0
-ParameterNumber|-sizey|Size Y|None|None|0
diff --git a/python/plugins/processing/otb/description/FineRegistration.txt b/python/plugins/processing/otb/description/FineRegistration.txt
deleted file mode 100644
index 83554534dbf2..000000000000
--- a/python/plugins/processing/otb/description/FineRegistration.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-FineRegistration
-otbcli_FineRegistration
-Fine Registration
-Stereo
-ParameterRaster|-ref|Reference Image|False
-ParameterRaster|-sec|Secondary Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-erx|Exploration Radius X|None|None|0
-ParameterNumber|-ery|Exploration Radius Y|None|None|0
-ParameterNumber|-mrx|Metric Radius X|None|None|0
-ParameterNumber|-mry|Metric Radius Y|None|None|0
-ParameterRaster|-w|Image To Warp|True
-OutputRaster|-wo|Output Warped Image
-ParameterNumber|-cox|Coarse Offset X|None|None|0.0
-ParameterNumber|-coy|Coarse Offset Y|None|None|0.0
-ParameterNumber|-ssrx|Sub-Sampling Rate X|None|None|1.0
-ParameterNumber|-ssry|Sub-Sampling Rate Y|None|None|1.0
-ParameterNumber|-rgsx|Reference Gaussian Smoothing X|None|None|0.0
-ParameterNumber|-rgsy|Reference Gaussian Smoothing Y|None|None|0.0
-ParameterNumber|-sgsx|Secondary Gaussian Smoothing X|None|None|0.0
-ParameterNumber|-sgsy|Secondary Gaussian Smoothing Y|None|None|0.0
-ParameterString|-m|Metric|
-ParameterNumber|-spa|SubPixelAccuracy|None|None|0.00999999977648
-ParameterNumber|-vmlt|Validity Mask Lower Threshold|None|None|0.0
-ParameterNumber|-vmut|Validity Mask Upper Threshold|None|None|0.0
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/FusionOfClassifications-dempstershafer.xml b/python/plugins/processing/otb/description/FusionOfClassifications-dempstershafer.xml
new file mode 100644
index 000000000000..47f9d023cf8a
--- /dev/null
+++ b/python/plugins/processing/otb/description/FusionOfClassifications-dempstershafer.xml
@@ -0,0 +1,74 @@
+
+ FusionOfClassifications-dempstershafer
+ otbcli_FusionOfClassifications
+ FusionOfClassifications (dempstershafer)
+ Learning
+ Fuses several classifications maps of the same image on the basis of class labels.
+
+ ParameterMultipleInput
+ il
+ Input classifications
+ List of input classification maps to fuse. Labels in each classification image must represent the same class.
+
+ False
+
+
+ ParameterSelection
+ method
+ Fusion method
+ Selection of the fusion method and its parameters.
+
+
+ dempstershafer
+
+
+ 0
+
+
+ ParameterMultipleExternalInput
+ method.dempstershafer.cmfl
+ Confusion Matrices
+ A list of confusion matrix files (*.CSV format) to define the masses of belief and the class labels. Each file should be formatted the following way: the first line, beginning with a '#' symbol, should be a list of the class labels present in the corresponding input classification image, organized in the same order as the confusion matrix rows/columns.
+ False
+
+
+ ParameterSelection
+ method.dempstershafer.mob
+ Mass of belief measurement
+ Type of confusion matrix measurement used to compute the masses of belief of each classifier.
+
+
+ precision
+ recall
+ accuracy
+ kappa
+
+
+ 0
+
+
+ ParameterNumber
+ nodatalabel
+ Label for the NoData class
+ Label for the NoData class. Such input pixels keep their NoData label in the output image and are not handled in the fusion process. By default, 'nodatalabel = 0'.
+
+
+ 0
+
+
+ ParameterNumber
+ undecidedlabel
+ Label for the Undecided class
+ Label for the Undecided class. Pixels with more than 1 fused class are marked as Undecided. Please note that the Undecided value must be different from existing labels in the input classifications. By default, 'undecidedlabel = 0'.
+
+
+ 0
+
+
+ OutputRaster
+ out
+ The output classification image
+ The output classification image resulting from the fusion of the input classification images.
+
+
+
diff --git a/python/plugins/processing/otb/description/FusionOfClassifications-majorityvoting.xml b/python/plugins/processing/otb/description/FusionOfClassifications-majorityvoting.xml
new file mode 100644
index 000000000000..22a743bdd711
--- /dev/null
+++ b/python/plugins/processing/otb/description/FusionOfClassifications-majorityvoting.xml
@@ -0,0 +1,52 @@
+
+ FusionOfClassifications-majorityvoting
+ otbcli_FusionOfClassifications
+ FusionOfClassifications (majorityvoting)
+ Learning
+ Fuses several classifications maps of the same image on the basis of class labels.
+
+ ParameterMultipleInput
+ il
+ Input classifications
+ List of input classification maps to fuse. Labels in each classification image must represent the same class.
+
+ False
+
+
+ ParameterSelection
+ method
+ Fusion method
+ Selection of the fusion method and its parameters.
+
+
+ majorityvoting
+
+
+ 0
+
+
+ ParameterNumber
+ nodatalabel
+ Label for the NoData class
+ Label for the NoData class. Such input pixels keep their NoData label in the output image and are not handled in the fusion process. By default, 'nodatalabel = 0'.
+
+
+ 0
+
+
+ ParameterNumber
+ undecidedlabel
+ Label for the Undecided class
+ Label for the Undecided class. Pixels with more than 1 fused class are marked as Undecided. Please note that the Undecided value must be different from existing labels in the input classifications. By default, 'undecidedlabel = 0'.
+
+
+ 0
+
+
+ OutputRaster
+ out
+ The output classification image
+ The output classification image resulting from the fusion of the input classification images.
+
+
+
diff --git a/python/plugins/processing/otb/description/FusionOfClassifications.txt b/python/plugins/processing/otb/description/FusionOfClassifications.txt
deleted file mode 100644
index f40519c13014..000000000000
--- a/python/plugins/processing/otb/description/FusionOfClassifications.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Fusion of Classifications
-otbcli_FusionOfClassifications
-Fusion of Classifications
-Learning
-ParameterMultipleInput|-il|Input classifications|-1|False
-ParameterNumber|-undecided|Label for the undecided class|None|None|0
-OutputRaster|-out|Output Image
diff --git a/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-closing.xml b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-closing.xml
new file mode 100644
index 000000000000..00e204da52b0
--- /dev/null
+++ b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-closing.xml
@@ -0,0 +1,72 @@
+
+ GrayScaleMorphologicalOperation-closing
+ otbcli_GrayScaleMorphologicalOperation
+ GrayScaleMorphologicalOperation (closing)
+ Feature Extraction
+ Performs morphological operations on a grayscale input image
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ closing
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-dilate.xml b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-dilate.xml
new file mode 100644
index 000000000000..c811ad2b1191
--- /dev/null
+++ b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-dilate.xml
@@ -0,0 +1,72 @@
+
+ GrayScaleMorphologicalOperation-dilate
+ otbcli_GrayScaleMorphologicalOperation
+ GrayScaleMorphologicalOperation (dilate)
+ Feature Extraction
+ Performs morphological operations on a grayscale input image
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ dilate
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-erode.xml b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-erode.xml
new file mode 100644
index 000000000000..8e3602090f16
--- /dev/null
+++ b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-erode.xml
@@ -0,0 +1,72 @@
+
+ GrayScaleMorphologicalOperation-erode
+ otbcli_GrayScaleMorphologicalOperation
+ GrayScaleMorphologicalOperation (erode)
+ Feature Extraction
+ Performs morphological operations on a grayscale input image
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ erode
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-opening.xml b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-opening.xml
new file mode 100644
index 000000000000..8e50230edd52
--- /dev/null
+++ b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation-opening.xml
@@ -0,0 +1,72 @@
+
+ GrayScaleMorphologicalOperation-opening
+ otbcli_GrayScaleMorphologicalOperation
+ GrayScaleMorphologicalOperation (opening)
+ Feature Extraction
+ Performs morphological operations on a grayscale input image
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to be filtered.
+ False
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the filtered output image.
+
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ structype
+ Structuring Element Type
+ Choice of the structuring element type
+
+
+ ball
+
+
+ 0
+
+
+ ParameterNumber
+ structype.ball.xradius
+ The Structuring Element Radius
+ The Structuring Element Radius
+
+
+ 5
+
+
+ ParameterSelection
+ filter
+ Morphological Operation
+ Choice of the morphological operation
+
+
+ opening
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation.txt b/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation.txt
deleted file mode 100644
index 15778b40fb85..000000000000
--- a/python/plugins/processing/otb/description/GrayScaleMorphologicalOperation.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Gray Scale Morphological Operation
-otbcli_GrayScaleMorphologicalOperation
-Gray Scale Morphological Operation
-Image Filtering
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-channel|Selected Channel|None|None|1
-ParameterSelection|-structype|Structuring Element Type|ball;cross|0
-ParameterNumber|-structype.ball.xradius|The Structuring Element X Radius|None|None|5
-ParameterNumber|-structype.ball.yradius|The Structuring Element Y Radius|None|None|5
-ParameterSelection|-filter|Morphological Operation|dilate;erode;opening;closing|0
diff --git a/python/plugins/processing/otb/description/GridBasedImageResampling.txt b/python/plugins/processing/otb/description/GridBasedImageResampling.txt
deleted file mode 100644
index 34ce865460af..000000000000
--- a/python/plugins/processing/otb/description/GridBasedImageResampling.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-GridBasedImageResampling
-otbcli_GridBasedImageResampling
-Grid Based Image Resampling
-Geometry
-ParameterRaster|-io.in|Input image|False
-OutputRaster|-io.out|Output Image
-ParameterRaster|-grid.in|Input resampling grid|False
-ParameterSelection|-grid.type|Grid Type|def;loc|0
-ParameterNumber|-out.ulx|Upper Left X|None|None|0.0
-ParameterNumber|-out.uly|Upper Left Y|None|None|0.0
-ParameterNumber|-out.sizex|Size X|None|None|0
-ParameterNumber|-out.sizey|Size Y|None|None|0
-ParameterNumber|-out.spacingx|Pixel Size X|None|None|1.0
-ParameterNumber|-out.spacingy|Pixel Size Y|None|None|1.0
-ParameterNumber|-out.default|Default value|None|None|0.0
-ParameterSelection|-interpolator|Interpolation|nn;linear;bco|2
-ParameterNumber|-interpolator.bco.radius|Radius for bicubic interpolation|None|None|2
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/HaralickTextureExtraction.txt b/python/plugins/processing/otb/description/HaralickTextureExtraction.txt
deleted file mode 100644
index a1909c622f81..000000000000
--- a/python/plugins/processing/otb/description/HaralickTextureExtraction.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Haralick Texture Extraction
-otbcli_HaralickTextureExtraction
-Haralick Texture Extraction
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-channel|Raster Band|None|None|1
-ParameterNumber|-parameters.xrad|X Radius|None|None|1
-ParameterNumber|-parameters.yrad|Y Radius|None|None|1
-ParameterNumber|-parameters.xoff|X Offset|None|None|1
-ParameterNumber|-parameters.yoff|Y Offset|None|None|1
-ParameterNumber|-parameters.min|Image Minimum|None|None|1
-ParameterNumber|-parameters.max|Image Maximum|None|None|255
-ParameterNumber|-parameters.nbbin|Histogram number of bin|None|None|8
-ParameterSelection|-texture|Texture Set Selection|simple;advanced;higher|0
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/HaralickTextureExtraction.xml b/python/plugins/processing/otb/description/HaralickTextureExtraction.xml
new file mode 100644
index 000000000000..9ce1ceac932b
--- /dev/null
+++ b/python/plugins/processing/otb/description/HaralickTextureExtraction.xml
@@ -0,0 +1,116 @@
+
+ HaralickTextureExtraction
+ otbcli_HaralickTextureExtraction
+ Haralick Texture Extraction
+ Feature Extraction
+ Computes textures on every pixel of the input image selected channel
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to compute the features on.
+ False
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterNumber
+ parameters.xrad
+ X Radius
+ X Radius
+
+
+ 2
+
+
+ ParameterNumber
+ parameters.yrad
+ Y Radius
+ Y Radius
+
+
+ 2
+
+
+ ParameterNumber
+ parameters.xoff
+ X Offset
+ X Offset
+
+
+ 1
+
+
+ ParameterNumber
+ parameters.yoff
+ Y Offset
+ Y Offset
+
+
+ 1
+
+
+ ParameterNumber
+ parameters.min
+ Image Minimum
+ Image Minimum
+
+
+ 0
+
+
+ ParameterNumber
+ parameters.max
+ Image Maximum
+ Image Maximum
+
+
+ 255
+
+
+ ParameterNumber
+ parameters.nbbin
+ Histogram number of bin
+ Histogram number of bin
+
+
+ 8
+
+
+ ParameterSelection
+ texture
+ Texture Set Selection
+ Choice of The Texture Set
+
+
+ simple
+ advanced
+ higher
+
+
+ 0
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image containing the selected texture features.
+
+
+
diff --git a/python/plugins/processing/otb/description/HooverCompareSegmentation.txt b/python/plugins/processing/otb/description/HooverCompareSegmentation.txt
deleted file mode 100644
index af1d02f8bea5..000000000000
--- a/python/plugins/processing/otb/description/HooverCompareSegmentation.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-HooverCompareSegmentation
-otbcli_HooverCompareSegmentation
-Hoover compare segmentation
-Segmentation
-ParameterRaster|-ingt|Input ground truth|False
-ParameterRaster|-inms|Input machine segmentation|False
-ParameterNumber|-bg|Background label|None|None|0
-ParameterNumber|-th|Overlapping threshold|None|None|0.75
-OutputRaster|-outgt|Colored ground truth output
-OutputRaster|-outms|Colored machine segmentation output
-ParameterNumber|-rc|Correct detection score|None|None|0.0
-ParameterNumber|-rf|Over-segmentation score|None|None|0.0
-ParameterNumber|-ra|Under-segmentation score|None|None|0.0
-ParameterNumber|-rm|Missed detection score|None|None|0.0
diff --git a/python/plugins/processing/otb/description/HooverCompareSegmentation.xml b/python/plugins/processing/otb/description/HooverCompareSegmentation.xml
new file mode 100644
index 000000000000..7c772ecbc39f
--- /dev/null
+++ b/python/plugins/processing/otb/description/HooverCompareSegmentation.xml
@@ -0,0 +1,89 @@
+
+ HooverCompareSegmentation
+ otbcli_HooverCompareSegmentation
+ Hoover compare segmentation
+ Segmentation
+ Compare two segmentations with Hoover metrics
+
+ ParameterRaster
+ ingt
+ Input ground truth
+ A partial ground truth segmentation image.
+ False
+
+
+ ParameterRaster
+ inms
+ Input machine segmentation
+ A machine segmentation image.
+ False
+
+
+ ParameterNumber
+ bg
+ Background label
+ Label value of the background in the input segmentations
+
+
+ 0
+
+
+ ParameterNumber
+ th
+ Overlapping threshold
+ Overlapping threshold used to find Hoover instances.
+
+
+ 0.75
+
+
+ OutputRaster
+ outgt
+ Colored ground truth output
+ The colored ground truth output image.
+
+
+
+ OutputRaster
+ outms
+ Colored machine segmentation output
+ The colored machine segmentation output image.
+
+
+
+ ParameterNumber
+ rc
+ Correct detection score
+ Overall score for correct detection (RC)
+
+
+ 0.0
+
+
+ ParameterNumber
+ rf
+ Over-segmentation score
+ Overall score for over segmentation (RF)
+
+
+ 0.0
+
+
+ ParameterNumber
+ ra
+ Under-segmentation score
+ Overall score for under segmentation (RA)
+
+
+ 0.0
+
+
+ ParameterNumber
+ rm
+ Missed detection score
+ Overall score for missed detection (RM)
+
+
+ 0.0
+
+
diff --git a/python/plugins/processing/otb/description/HyperspectralUnmixing.txt b/python/plugins/processing/otb/description/HyperspectralUnmixing.txt
deleted file mode 100644
index dd9fd59a877f..000000000000
--- a/python/plugins/processing/otb/description/HyperspectralUnmixing.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-HyperspectralUnmixing
-otbcli_HyperspectralUnmixing
-Hyperspectral data unmixing
-Miscellaneous
-ParameterRaster|-in|Input Image Filename|False
-OutputRaster|-out|Output Image
-ParameterRaster|-ie|Input endmembers|False
-ParameterSelection|-ua|Unmixing algorithm|ucls;ncls;isra;mdmdnmf|0
diff --git a/python/plugins/processing/otb/description/ImageClassifier.xml b/python/plugins/processing/otb/description/ImageClassifier.xml
new file mode 100644
index 000000000000..4f799e5e299a
--- /dev/null
+++ b/python/plugins/processing/otb/description/ImageClassifier.xml
@@ -0,0 +1,53 @@
+
+ ImageClassifier
+ otbcli_ImageClassifier
+ Image Classification
+ Learning
+ Performs a classification of the input image according to a model file.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to classify.
+ False
+
+
+ ParameterRaster
+ mask
+ Input Mask
+ The mask allows to restrict classification of the input image to the area where mask pixel values are greater than 0.
+ True
+
+
+ ParameterFile
+ model
+ Model file
+ A model file (produced by TrainImagesClassifier application, maximal class label = 65535).
+
+ False
+
+
+ ParameterFile
+ imstat
+ Statistics file
+ A XML file containing mean and standard deviation to center and reduce samples before classification (produced by ComputeImagesStatistics application).
+
+ True
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image containing class labels
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/ImageEnvelope.txt b/python/plugins/processing/otb/description/ImageEnvelope.txt
deleted file mode 100644
index a4b1ada7079b..000000000000
--- a/python/plugins/processing/otb/description/ImageEnvelope.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-ImageEnvelope
-otbcli_ImageEnvelope
-Image Envelope
-Geometry
-ParameterRaster|-in|Input Image|False
-OutputVector|-out|Output Vector Data
-ParameterNumber|-sr|Sampling Rate|None|None|0
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterString|-proj|Projection|
diff --git a/python/plugins/processing/otb/description/ImageEnvelope.xml b/python/plugins/processing/otb/description/ImageEnvelope.xml
new file mode 100644
index 000000000000..66d4e3ecc61c
--- /dev/null
+++ b/python/plugins/processing/otb/description/ImageEnvelope.xml
@@ -0,0 +1,39 @@
+
+ ImageEnvelope
+ otbcli_ImageEnvelope
+ Image Envelope
+ Geometry
+ Extracts an image envelope.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image.
+ False
+
+
+ OutputVector
+ out
+ Output Vector Data
+ Vector data file containing the envelope
+
+
+
+ ParameterNumber
+ sr
+ Sampling Rate
+ Sampling rate for image edges (in pixel)
+
+
+ 0
+
+
+ ParameterString
+ proj
+ Projection
+ Projection to be used to compute the envelope (default is WGS84)
+
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/ImageSVMClassifier.txt b/python/plugins/processing/otb/description/ImageSVMClassifier.txt
deleted file mode 100644
index 186ca2647025..000000000000
--- a/python/plugins/processing/otb/description/ImageSVMClassifier.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-ImageSVMClassifier
-otbcli_ImageSVMClassifier
-Image SVM Classification
-Learning
-ParameterRaster|-in|Input Image|False
-ParameterRaster|-mask|Input Mask|True
-ParameterFile|-svm|SVM Model file||
-ParameterFile|-imstat|Statistics file|
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/KMeansClassification.txt b/python/plugins/processing/otb/description/KMeansClassification.txt
deleted file mode 100644
index b44cea5d5744..000000000000
--- a/python/plugins/processing/otb/description/KMeansClassification.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-KMeansClassification
-otbcli_KMeansClassification
-Unsupervised KMeans image classification
-Learning
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterRaster|-vm|Validity Mask|True
-ParameterNumber|-ts|Training set size|None|None|100
-ParameterNumber|-nc|Number of classes|None|None|5
-ParameterNumber|-maxit|Maximum number of iterations|None|None|1000
-ParameterNumber|-ct|Convergence threshold|None|None|9.99999974738e-05
-OutputFile|-outmeans|Centroid filename
-ParameterNumber|-rand|set user defined seed|None|None|0
diff --git a/python/plugins/processing/otb/description/KMeansClassification.xml b/python/plugins/processing/otb/description/KMeansClassification.xml
new file mode 100644
index 000000000000..7c2d15b71388
--- /dev/null
+++ b/python/plugins/processing/otb/description/KMeansClassification.xml
@@ -0,0 +1,80 @@
+
+ KMeansClassification
+ otbcli_KMeansClassification
+ Unsupervised KMeans image classification
+ Learning
+ Unsupervised KMeans image classification
+
+ ParameterRaster
+ in
+ Input Image
+ Input image to classify.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output image containing the class indexes.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterRaster
+ vm
+ Validity Mask
+ Validity mask. Only non-zero pixels will be used to estimate KMeans modes.
+ True
+
+
+ ParameterNumber
+ ts
+ Training set size
+ Size of the training set (in pixels).
+
+
+ 100
+
+
+ ParameterNumber
+ nc
+ Number of classes
+ Number of modes, which will be used to generate class membership.
+
+
+ 5
+
+
+ ParameterNumber
+ maxit
+ Maximum number of iterations
+ Maximum number of iterations for the learning step.
+
+
+ 1000
+
+
+ ParameterNumber
+ ct
+ Convergence threshold
+ Convergence threshold for class centroid (L2 distance, by default 0.0001).
+
+
+ 0.0001
+
+
+ OutputFile
+ outmeans
+ Centroid filename
+ Output text file containing centroid positions
+
+
+
diff --git a/python/plugins/processing/otb/description/KmzExport.txt b/python/plugins/processing/otb/description/KmzExport.txt
deleted file mode 100644
index f081a440a6dc..000000000000
--- a/python/plugins/processing/otb/description/KmzExport.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-KmzExport
-otbcli_KmzExport
-Image to KMZ Export
-Miscellaneous
-ParameterRaster|-in|Input image|False
-OutputFile|-out|Output .kmz product
-ParameterNumber|-tilesize|Tile Size|None|None|256
-ParameterRaster|-logo|Image logo|True
-ParameterRaster|-legend|Image legend|True
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterString|-elev.dem|DEM directory|True
-ParameterFile|-elev.geoid|Geoid File|
-ParameterNumber|-elev.default|Average Elevation|None|None|0.0
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/KmzExport.xml b/python/plugins/processing/otb/description/KmzExport.xml
new file mode 100644
index 000000000000..9eef07197da9
--- /dev/null
+++ b/python/plugins/processing/otb/description/KmzExport.xml
@@ -0,0 +1,53 @@
+
+ KmzExport
+ otbcli_KmzExport
+ Image to KMZ Export
+ Miscellaneous
+ Export the input image in a KMZ product.
+
+ ParameterRaster
+ in
+ Input image
+ Input image
+ False
+
+
+ OutputFile
+ out
+ Output .kmz product
+ Output Kmz product directory (with .kmz extension)
+
+
+
+ ParameterNumber
+ tilesize
+ Tile Size
+ Size of the tiles in the kmz product, in number of pixels (default = 512).
+
+
+ 512
+
+
+ ParameterRaster
+ logo
+ Image logo
+ Path to the image logo to add to the KMZ product.
+ True
+
+
+ ParameterRaster
+ legend
+ Image legend
+ Path to the image legend to add to the KMZ product.
+ True
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/LSMSSegmentation.xml b/python/plugins/processing/otb/description/LSMSSegmentation.xml
new file mode 100644
index 000000000000..14449f66bb82
--- /dev/null
+++ b/python/plugins/processing/otb/description/LSMSSegmentation.xml
@@ -0,0 +1,88 @@
+
+ LSMSSegmentation
+ otbcli_LSMSSegmentation
+ Exact Large-Scale Mean-Shift segmentation, step 2
+ Segmentation
+ Second step of the exact Large-Scale Mean-Shift segmentation workflow.
+
+ ParameterRaster
+ in
+ Filtered image
+ The filtered image (cf. Adaptive MeanShift Smoothing application).
+ False
+
+
+ ParameterRaster
+ inpos
+ Spatial image
+ The spatial image. Spatial input is the displacement map (output of the Adaptive MeanShift Smoothing application).
+ True
+
+
+ OutputRaster
+ out
+ Output Image
+ The output image. The output image is the segmentation of the filtered image. It is recommended to set the pixel type to uint32.
+
+
+
+ ParameterNumber
+ ranger
+ Range radius
+ Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.
+
+
+ 15
+
+
+ ParameterNumber
+ spatialr
+ Spatial radius
+ Spatial radius of the neighborhood.
+
+
+ 5
+
+
+ ParameterNumber
+ minsize
+ Minimum Region Size
+ Minimum Region Size. If, after the segmentation, a region is of size lower than this criterion, the region is deleted.
+
+
+ 0
+
+
+ ParameterNumber
+ tilesizex
+ Size of tiles in pixel (X-axis)
+ Size of tiles along the X-axis.
+
+
+ 500
+
+
+ ParameterNumber
+ tilesizey
+ Size of tiles in pixel (Y-axis)
+ Size of tiles along the Y-axis.
+
+
+ 500
+
+
+ ParameterFile
+ tmpdir
+ Directory where to write temporary files
+ This applications need to write temporary files for each tile. This parameter allows choosing the path where to write those files. If disabled, the current path will be used.
+
+ True
+
+
+ ParameterBoolean
+ cleanup
+ Temporary files cleaning
+ If activated, the application will try to clean all temporary files it created
+ True
+
+
diff --git a/python/plugins/processing/otb/description/LSMSSmallRegionsMerging.xml b/python/plugins/processing/otb/description/LSMSSmallRegionsMerging.xml
new file mode 100644
index 000000000000..9cb0621e80c3
--- /dev/null
+++ b/python/plugins/processing/otb/description/LSMSSmallRegionsMerging.xml
@@ -0,0 +1,55 @@
+
+ LSMSSmallRegionsMerging
+ otbcli_LSMSSmallRegionsMerging
+ Exact Large-Scale Mean-Shift segmentation, step 3 (optional)
+ Segmentation
+ Third (optional) step of the exact Large-Scale Mean-Shift segmentation workflow.
+
+ ParameterRaster
+ in
+ Input image
+ The input image.
+ False
+
+
+ ParameterRaster
+ inseg
+ Segmented image
+ The segmented image input. Segmented image input is the segmentation of the input image.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ The output image. The output image is the input image where the minimal regions have been merged.
+
+
+
+ ParameterNumber
+ minsize
+ Minimum Region Size
+ Minimum Region Size. If, after the segmentation, a region is of size lower than this criterion, the region is merged with the "nearest" region (radiometrically).
+
+
+ 50
+
+
+ ParameterNumber
+ tilesizex
+ Size of tiles in pixel (X-axis)
+ Size of tiles along the X-axis.
+
+
+ 500
+
+
+ ParameterNumber
+ tilesizey
+ Size of tiles in pixel (Y-axis)
+ Size of tiles along the Y-axis.
+
+
+ 500
+
+
diff --git a/python/plugins/processing/otb/description/LSMSVectorization.xml b/python/plugins/processing/otb/description/LSMSVectorization.xml
new file mode 100644
index 000000000000..2d908bddb4d4
--- /dev/null
+++ b/python/plugins/processing/otb/description/LSMSVectorization.xml
@@ -0,0 +1,46 @@
+
+ LSMSVectorization
+ otbcli_LSMSVectorization
+ Exact Large-Scale Mean-Shift segmentation, step 4
+ Segmentation
+ Fourth step of the exact Large-Scale Mean-Shift segmentation workflow.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image.
+ False
+
+
+ ParameterRaster
+ inseg
+ Segmented image
+ The segmented image input. Segmented image input is the segmentation of the input image.
+ False
+
+
+ OutputVector
+ out
+ Output GIS vector file
+ The output GIS vector file, representing the vectorized version of the segmented image where the features of the polygons are the radiometric means and variances.
+
+
+
+ ParameterNumber
+ tilesizex
+ Size of tiles in pixel (X-axis)
+ Size of tiles along the X-axis.
+
+
+ 500
+
+
+ ParameterNumber
+ tilesizey
+ Size of tiles in pixel (Y-axis)
+ Size of tiles along the Y-axis.
+
+
+ 500
+
+
diff --git a/python/plugins/processing/otb/description/LineSegmentDetection.txt b/python/plugins/processing/otb/description/LineSegmentDetection.txt
deleted file mode 100644
index 1354eba52d2d..000000000000
--- a/python/plugins/processing/otb/description/LineSegmentDetection.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-LineSegmentDetection
-otbcli_LineSegmentDetection
-Line segment detection
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-OutputVector|-out|Output Detected lines
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterBoolean|-norescale|No rescaling in [0, 255]|
diff --git a/python/plugins/processing/otb/description/LineSegmentDetection.xml b/python/plugins/processing/otb/description/LineSegmentDetection.xml
new file mode 100644
index 000000000000..20822d5dc351
--- /dev/null
+++ b/python/plugins/processing/otb/description/LineSegmentDetection.xml
@@ -0,0 +1,28 @@
+
+ LineSegmentDetection
+ otbcli_LineSegmentDetection
+ Line segment detection
+ Feature Extraction
+ Detect line segments in raster
+
+ ParameterRaster
+ in
+ Input Image
+ Input image on which lines will be detected.
+ False
+
+
+ OutputVector
+ out
+ Output Detected lines
+ Output detected line segments (vector data).
+
+
+
+ ParameterBoolean
+ norescale
+ No rescaling in [0, 255]
+ By default, the input image amplitude is rescaled between [0,255]. Turn on this parameter to skip rescaling
+ True
+
+
diff --git a/python/plugins/processing/otb/description/LocalStatisticExtraction.txt b/python/plugins/processing/otb/description/LocalStatisticExtraction.txt
deleted file mode 100644
index eeb039cfd4bd..000000000000
--- a/python/plugins/processing/otb/description/LocalStatisticExtraction.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Local Statistic Extraction
-otbcli_LocalStatisticExtraction
-Local Statistic Extraction
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-channel|Selected Channel|None|None|1
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-radius|Neighborhood radius|None|None|3
diff --git a/python/plugins/processing/otb/description/LocalStatisticExtraction.xml b/python/plugins/processing/otb/description/LocalStatisticExtraction.xml
new file mode 100644
index 000000000000..f94caa6580fc
--- /dev/null
+++ b/python/plugins/processing/otb/description/LocalStatisticExtraction.xml
@@ -0,0 +1,48 @@
+
+ LocalStatisticExtraction
+ otbcli_LocalStatisticExtraction
+ Local Statistic Extraction
+ Feature Extraction
+ Computes local statistical moments on every pixel in the selected channel of the input image
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to compute the features on.
+ False
+
+
+ ParameterNumber
+ channel
+ Selected Channel
+ The selected channel index
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterNumber
+ radius
+ Neighborhood radius
+ The computational window radius.
+
+
+ 3
+
+
+ OutputRaster
+ out
+ Feature Output Image
+ Output image containing the local statistical moments.
+
+
+
diff --git a/python/plugins/processing/otb/description/MaximumAutocorrelationFactor.txt b/python/plugins/processing/otb/description/MaximumAutocorrelationFactor.txt
deleted file mode 100644
index 32b047e6a66e..000000000000
--- a/python/plugins/processing/otb/description/MaximumAutocorrelationFactor.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-MaximumAutocorrelationFactor
-otbcli_MaximumAutocorrelationFactor
-Maximum Auto-correlation Factor Decomposition
-Image Filtering
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|MAF output
diff --git a/python/plugins/processing/otb/description/MeanShiftSegmentation.txt b/python/plugins/processing/otb/description/MeanShiftSegmentation.txt
deleted file mode 100644
index a8ad1cdcc95d..000000000000
--- a/python/plugins/processing/otb/description/MeanShiftSegmentation.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-MeanShiftSegmentation
-otbcli_Segmentation
-Mean Shift segmentation (labeled raster output)
-Segmentation
-ParameterRaster|-filter meanshift -in|Input Image|False
-ParameterNumber|-filter.meanshift.spatialr|Spatial radius|None|None|5
-ParameterNumber|-filter.meanshift.thres|Mode convergence threshold|None|None|0.1
-ParameterNumber|-filter.meanshift.ranger|Range radius|None|None|15.0
-ParameterNumber|-filter.meanshift.minsize|Min region size|0|None|100
-ParameterNumber|-filter.meanshift.maxiter|Maximum number of iterations|0|None|100
-OutputRaster|-mode raster -mode.raster.out|Output labeled image
-
diff --git a/python/plugins/processing/otb/description/MeanShiftSegmentation_vector.txt b/python/plugins/processing/otb/description/MeanShiftSegmentation_vector.txt
deleted file mode 100644
index 575c74741518..000000000000
--- a/python/plugins/processing/otb/description/MeanShiftSegmentation_vector.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-MeanShiftSegmentationVector
-otbcli_Segmentation
-Mean Shift segmentation (large-scale, vector output)
-Segmentation
-ParameterRaster|-filter meanshift -in|Input Image|False
-ParameterNumber|-filter.meanshift.spatialr|Spatial radius|None|None|5
-ParameterNumber|-filter.meanshift.thres|Mode convergence threshold|None|None|0.1
-ParameterNumber|-filter.meanshift.ranger|Range radius|None|None|15.0
-ParameterNumber|-filter.meanshift.minsize|Min region size|0|None|100
-ParameterNumber|-filter.meanshift.maxiter|Maximum number of iterations|0|None|100
-OutputVector|-mode vector -mode.vector.out|Output vector file
-ParameterVector|-mode.vector.inmask|Mask Image|-1|True
-ParameterBoolean|-mode.vector.neighbor|8-neighbor connectivity|False
-ParameterBoolean|-mode.vector.stitch|Stitch polygons|True
-ParameterNumber|-mode.vector.minsize|Minimum object size|1|None|1
-ParameterNumber|-mode.vector.simplify|Simplify polygons|None|None|0.0
-ParameterString|-mode.vector.layername|Layer name |layer
-ParemeterString|-mode.vector.fieldname|Geometry index field name|DN
-ParameterNumber|-mode.vector.tilesize|Tile size|0|None|1024
-ParameterNumber|-mode.vector.startlabel|Starting geometry index|1|None|1
-ParameterSelection|-mode.vector.outmode|Writing mode (update file/overwrite file/overwrite layer/update layer)|ulco;ovw;ulovw;ulu|0
-ParameterString|-mode.vector.ogroptions|OGR options for layer creation|
diff --git a/python/plugins/processing/otb/description/MeanShiftSmoothing.txt b/python/plugins/processing/otb/description/MeanShiftSmoothing.txt
deleted file mode 100644
index dce783d133e4..000000000000
--- a/python/plugins/processing/otb/description/MeanShiftSmoothing.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-MeanShiftSmoothing
-otbcli_MeanShiftSmoothing
-Mean Shift filtering
-Image Filtering
-ParameterRaster|-in|Input Image|False
-OutputRaster|-fout|Filtered output
-ParameterNumber|-spatialr|Spatial radius|None|None|5
-ParameterNumber|-ranger|Range radius|None|None|15.0
-ParameterNumber|-thres|Mode convergence threshold|None|None|0.10000000149
-ParameterNumber|-maxiter|Maximum number of iterations|None|None|100
-ParameterBoolean|-modesearch|Mode search.|True
diff --git a/python/plugins/processing/otb/description/MeanShiftSmoothing.xml b/python/plugins/processing/otb/description/MeanShiftSmoothing.xml
new file mode 100644
index 000000000000..d0253d84d00f
--- /dev/null
+++ b/python/plugins/processing/otb/description/MeanShiftSmoothing.xml
@@ -0,0 +1,80 @@
+
+ MeanShiftSmoothing
+ otbcli_MeanShiftSmoothing
+ Mean Shift filtering (can be used as Exact Large-Scale Mean-Shift segmentation, step 1)
+ Image Filtering
+ Perform mean shift filtering
+
+ ParameterRaster
+ in
+ Input Image
+ The input image.
+ False
+
+
+ OutputRaster
+ fout
+ Filtered output
+ The filtered output image.
+
+
+
+ OutputRaster
+ foutpos
+ Spatial image
+ The spatial image output. Spatial image output is a displacement map (pixel position after convergence).
+
+
+
+ ParameterNumber
+ spatialr
+ Spatial radius
+ Spatial radius of the neighborhood.
+
+
+ 5
+
+
+ ParameterNumber
+ ranger
+ Range radius
+ Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.
+
+
+ 15
+
+
+ ParameterNumber
+ thres
+ Mode convergence threshold
+ Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.
+
+
+ 0.1
+
+
+ ParameterNumber
+ maxiter
+ Maximum number of iterations
+ Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.
+
+
+ 100
+
+
+ ParameterNumber
+ rangeramp
+ Range radius coefficient
+ This coefficient makes dependent the ranger of the colorimetry of the filtered pixel : y = rangeramp*x+ranger.
+
+
+ 0
+
+
+ ParameterBoolean
+ modesearch
+ Mode search.
+ If activated pixel iterative convergence is stopped if the path . Be careful, with this option, the result will slightly depend on thread number
+ True
+
+
diff --git a/python/plugins/processing/otb/description/MorphologicalProfilesSegmentation_raster.txt b/python/plugins/processing/otb/description/MorphologicalProfilesSegmentation_raster.txt
deleted file mode 100644
index 9ca8a9be4eb8..000000000000
--- a/python/plugins/processing/otb/description/MorphologicalProfilesSegmentation_raster.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-MorphologicalProfilesSegmentationRaster
-otbcli_Segmentation
-Morphological Profiles Based Segmentation (labeled raster output)
-Segmentation
-ParameterRaster|-filter mprofiles -in|Input Image|False
-ParameterNumber|-filter.mprofiles.size|Profile Size|None|None|5
-ParameterNumber|-filter.mprofiles.start|Initial Radius|None|None|1
-ParameterNumber|-filter.mprofiles.step|Radius Step|None|None|1
-ParameterNumber|-filter.mprofiles.sigma|Threshold of final decision rule|0|None|1
-OutputRaster|-mode raster -mode.raster.out|Output labeled image
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/MorphologicalProfilesSegmentation_vector.txt b/python/plugins/processing/otb/description/MorphologicalProfilesSegmentation_vector.txt
deleted file mode 100644
index 555b4d778a80..000000000000
--- a/python/plugins/processing/otb/description/MorphologicalProfilesSegmentation_vector.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MorphologicalProfilesSegmentationVector
-otbcli_Segmentation
-Morphological Profiles Based Segmentation (large-scale, vector output)
-Segmentation
-ParameterRaster|-filter mprofiles -in|Input Image|False
-ParameterNumber|-filter.mprofiles.size|Profile Size|None|None|5
-ParameterNumber|-filter.mprofiles.start|Initial Radius|None|None|1
-ParameterNumber|-filter.mprofiles.step|Radius Step|None|None|1
-ParameterNumber|-filter.mprofiles.sigma|Threshold of final decision rule|0|None|1
-ParameterBoolean|-mode.vector.neighbor|8-neighbor connectivity|False
-ParameterBoolean|-mode.vector.stitch|Stitch polygons|True
-ParameterNumber|-mode.vector.minsize|Minimum object size|1|None|1
-ParameterNumber|-mode.vector.simplify|Simplify polygons|None|None|0.0
-ParameterString|-mode.vector.layername|Layer name|layer
-ParemeterString|-mode.vector.fieldname|Geometry index field name|DN
-ParameterNumber|-mode.vector.tilesize|Tile size|0|None|1024
-ParameterNumber|-mode.vector.startlabel|Starting geometry index|1|None|1
-ParameterSelection|-mode.vector.outmode|Writing mode (update file/overwrite file/overwrite layer/update layer)|ulco;ovw;ulovw;ulu|0
-ParameterString|-mode.vector.ogroptions|OGR options for layer creation|
-ParameterVector|-mode.vector.inmask|Mask Image|-1|True
-OutputVector|-mode vector -mode.vector.out|Output vector file
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/MultiResolutionPyramid.txt b/python/plugins/processing/otb/description/MultiResolutionPyramid.txt
deleted file mode 100644
index de7bb3322e60..000000000000
--- a/python/plugins/processing/otb/description/MultiResolutionPyramid.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-MultiResolutionPyramid
-otbcli_MultiResolutionPyramid
-Multi Resolution Pyramid
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-OutputFile|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-level|Number Of Levels|None|None|1
-ParameterNumber|-sfactor|Subsampling factor|None|None|2
-ParameterNumber|-vfactor|Variance factor|None|None|0.600000023842
-ParameterBoolean|-fast|Use Fast Scheme|
diff --git a/python/plugins/processing/otb/description/MultivariateAlterationDetector.txt b/python/plugins/processing/otb/description/MultivariateAlterationDetector.txt
deleted file mode 100644
index 026015877dd9..000000000000
--- a/python/plugins/processing/otb/description/MultivariateAlterationDetector.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-MultivariateAlterationDetector
-otbcli_MultivariateAlterationDetector
-Multivariate alteration detector
-Feature Extraction
-ParameterRaster|-in1|Input Image 1|False
-ParameterRaster|-in2|Input Image 2|False
-OutputRaster|-out|Change Map
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/MultivariateAlterationDetector.xml b/python/plugins/processing/otb/description/MultivariateAlterationDetector.xml
new file mode 100644
index 000000000000..ec433281f34b
--- /dev/null
+++ b/python/plugins/processing/otb/description/MultivariateAlterationDetector.xml
@@ -0,0 +1,37 @@
+
+ MultivariateAlterationDetector
+ otbcli_MultivariateAlterationDetector
+ Multivariate alteration detector
+ Feature Extraction
+ Multivariate Alteration Detector
+
+ ParameterRaster
+ in1
+ Input Image 1
+ Image which describe initial state of the scene.
+ False
+
+
+ ParameterRaster
+ in2
+ Input Image 2
+ Image which describe scene after perturbations.
+ False
+
+
+ OutputRaster
+ out
+ Change Map
+ Image of detected changes.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/OSMDownloader.txt b/python/plugins/processing/otb/description/OSMDownloader.txt
deleted file mode 100644
index 9aa25e109875..000000000000
--- a/python/plugins/processing/otb/description/OSMDownloader.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-OSMDownloader
-otbcli_OSMDownloader
-Open Street Map layers importations applications
-Miscellaneous
-OutputVector|-out|Output vector data
-ParameterRaster|-support|Support image|False
-ParameterString|-key|OSM tag key|
-ParameterString|-value|OSM tag value|
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterBoolean|-printclasses|option to display available key/value classes|
diff --git a/python/plugins/processing/otb/description/ObtainUTMZoneFromGeoPoint.txt b/python/plugins/processing/otb/description/ObtainUTMZoneFromGeoPoint.txt
deleted file mode 100644
index e5aab204c02b..000000000000
--- a/python/plugins/processing/otb/description/ObtainUTMZoneFromGeoPoint.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-ObtainUTMZoneFromGeoPoint
-otbcli_ObtainUTMZoneFromGeoPoint
-Obtain UTM Zone From Geo Point
-Miscellaneous
-ParameterNumber|-lat|Latitude|None|None|0.0
-ParameterNumber|-lon|Longitude|None|None|0.0
-ParameterNumber|-utm|UTMZone|None|None|0
diff --git a/python/plugins/processing/otb/description/OpticalCalibration.txt b/python/plugins/processing/otb/description/OpticalCalibration.txt
deleted file mode 100644
index 809fdf14656a..000000000000
--- a/python/plugins/processing/otb/description/OpticalCalibration.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-OpticalCalibration
-otbcli_OpticalCalibration
-Optical calibration
-Calibration
-ParameterRaster|-in|Input|False
-OutputRaster|-out|Output
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterSelection|-level|Calibration Level|toa;toc|0
-ParameterBoolean|-milli|Convert to milli reflectance|
-ParameterFile|-rsr|Relative Spectral Response File||
-ParameterSelection|-atmo.aerosol|Aerosol Model|noaersol;continental;maritime;urban;desertic|0
-ParameterNumber|-atmo.oz|Ozone Amount|None|None|0.0
-ParameterNumber|-atmo.wa|Water Vapor Amount|None|None|2.5
-ParameterNumber|-atmo.pressure|Atmospheric Pressure|None|None|1030.0
-ParameterNumber|-atmo.opt|Aerosol Optical Thickness|None|None|0.20000000298
-ParameterFile|-atmo.aeronet|Aeronet File||
-ParameterNumber|-radius|Window radius|None|None|2
diff --git a/python/plugins/processing/otb/description/OpticalCalibration.xml b/python/plugins/processing/otb/description/OpticalCalibration.xml
new file mode 100644
index 000000000000..0d26f45ae140
--- /dev/null
+++ b/python/plugins/processing/otb/description/OpticalCalibration.xml
@@ -0,0 +1,66 @@
+
+ OpticalCalibration
+ otbcli_OpticalCalibration
+ Optical calibration
+ Calibration
+ Perform optical calibration TOA/TOC (Top Of Atmosphere/Top Of Canopy). Supported sensors: QuickBird, Ikonos, WorldView2, Formosat, Spot5, Pleiades
+
+ ParameterRaster
+ in
+ Input
+ Input image filename (values in DN)
+ False
+
+
+ OutputRaster
+ out
+ Output
+ Output calibrated image filename
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ level
+ Calibration Level
+
+
+
+ toa
+
+
+ 0
+
+
+ ParameterBoolean
+ milli
+ Convert to milli reflectance
+ Flag to use milli-reflectance instead of reflectance.
+This allows to save the image with integer pixel type (in the range [0, 1000] instead of floating point in the range [0, 1]. In order to do that, use this option and set the output pixel type (-out filename uint16 for example)
+ True
+
+
+ ParameterBoolean
+ clamp
+ Clamp of reflectivity values between [0, 100]
+ Clamping in the range [0, 100]. It can be useful to preserve area with specular reflectance.
+ True
+
+
+ ParameterFile
+ rsr
+ Relative Spectral Response File
+ Sensor relative spectral response file
+By default the application gets these informations in the metadata
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/OrthoRectification-epsg.xml b/python/plugins/processing/otb/description/OrthoRectification-epsg.xml
new file mode 100644
index 000000000000..15b225cb376e
--- /dev/null
+++ b/python/plugins/processing/otb/description/OrthoRectification-epsg.xml
@@ -0,0 +1,115 @@
+
+ OrthoRectification-epsg
+ otbcli_OrthoRectification
+ OrthoRectification (epsg)
+ Geometry
+ This application allows to ortho-rectify optical images from supported sensors.
+
+
+ ParameterRaster
+ io.in
+ Input Image
+ The input image to ortho-rectify
+ False
+
+
+ OutputRaster
+ io.out
+ Output Image
+ The ortho-rectified output image
+
+
+
+ ParameterSelection
+ map
+ Output Cartographic Map Projection
+ Parameters of the output map projection to be used.
+
+
+ epsg
+
+
+ 0
+
+
+ ParameterNumber
+ map.epsg.code
+ EPSG Code
+ See www.spatialreference.org to find which EPSG code is associated to your projection
+
+
+ 4326
+
+
+ ParameterSelection
+ outputs.mode
+ Parameters estimation modes
+
+
+
+ autosize
+ autospacing
+
+
+ 0
+
+
+ ParameterNumber
+ outputs.default
+ Default pixel value
+ Default value to write when outside of input image.
+
+
+ 0
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ bco
+ nn
+ linear
+
+
+ 0
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ opt.ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
+ ParameterNumber
+ opt.gridspacing
+ Resampling grid spacing
+ Resampling is done according to a coordinate mapping deformation grid, whose pixel size is set by this parameter, and expressed in the coordinate system of the output image The closer to the output spacing this parameter is, the more precise will be the ortho-rectified image,but increasing this parameter will reduce processing time.
+
+
+ 4
+
+
diff --git a/python/plugins/processing/otb/description/OrthoRectification-fit-to-ortho.xml b/python/plugins/processing/otb/description/OrthoRectification-fit-to-ortho.xml
new file mode 100644
index 000000000000..4c9cc1ed71a2
--- /dev/null
+++ b/python/plugins/processing/otb/description/OrthoRectification-fit-to-ortho.xml
@@ -0,0 +1,100 @@
+
+ OrthoRectification-fit-to-ortho
+ otbcli_OrthoRectification
+ OrthoRectification (fit-to-ortho)
+ Geometry
+ This application allows to ortho-rectify optical images from supported sensors.
+
+
+ ParameterRaster
+ io.in
+ Input Image
+ The input image to ortho-rectify
+ False
+
+
+ OutputRaster
+ io.out
+ Output Image
+ The ortho-rectified output image
+
+
+
+ ParameterSelection
+ outputs.mode
+ Parameters estimation modes
+
+
+
+ orthofit
+
+
+ 0
+
+
+ ParameterRaster
+ outputs.ortho
+ Model ortho-image
+ A model ortho-image that can be used to compute size, origin and spacing of the output
+ True
+
+
+ ParameterNumber
+ outputs.default
+ Default pixel value
+ Default value to write when outside of input image.
+
+
+ 0
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ bco
+ nn
+ linear
+
+
+ 0
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ opt.ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
+ ParameterNumber
+ opt.gridspacing
+ Resampling grid spacing
+ Resampling is done according to a coordinate mapping deformation grid, whose pixel size is set by this parameter, and expressed in the coordinate system of the output image The closer to the output spacing this parameter is, the more precise will be the ortho-rectified image,but increasing this parameter will reduce processing time.
+
+
+ 4
+
+
diff --git a/python/plugins/processing/otb/description/OrthoRectification-lambert-WGS84.xml b/python/plugins/processing/otb/description/OrthoRectification-lambert-WGS84.xml
new file mode 100644
index 000000000000..8282aeeae08b
--- /dev/null
+++ b/python/plugins/processing/otb/description/OrthoRectification-lambert-WGS84.xml
@@ -0,0 +1,108 @@
+
+ OrthoRectification-lambert-WGS84
+ otbcli_OrthoRectification
+ OrthoRectification (lambert-WGS84)
+ Geometry
+ This application allows to ortho-rectify optical images from supported sensors.
+
+
+ ParameterRaster
+ io.in
+ Input Image
+ The input image to ortho-rectify
+ False
+
+
+ OutputRaster
+ io.out
+ Output Image
+ The ortho-rectified output image
+
+
+
+ ParameterSelection
+ map
+ Output Cartographic Map Projection
+ Parameters of the output map projection to be used.
+
+
+ lambert2
+ lambert93
+ wgs
+
+
+ 0
+
+
+ ParameterSelection
+ outputs.mode
+ Parameters estimation modes
+
+
+
+ autosize
+ autospacing
+
+
+ 0
+
+
+ ParameterNumber
+ outputs.default
+ Default pixel value
+ Default value to write when outside of input image.
+
+
+ 0
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ bco
+ nn
+ linear
+
+
+ 0
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ opt.ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
+ ParameterNumber
+ opt.gridspacing
+ Resampling grid spacing
+ Resampling is done according to a coordinate mapping deformation grid, whose pixel size is set by this parameter, and expressed in the coordinate system of the output image The closer to the output spacing this parameter is, the more precise will be the ortho-rectified image,but increasing this parameter will reduce processing time.
+
+
+ 4
+
+
diff --git a/python/plugins/processing/otb/description/OrthoRectification-utm.xml b/python/plugins/processing/otb/description/OrthoRectification-utm.xml
new file mode 100644
index 000000000000..f0dafe09515e
--- /dev/null
+++ b/python/plugins/processing/otb/description/OrthoRectification-utm.xml
@@ -0,0 +1,122 @@
+
+ OrthoRectification-utm
+ otbcli_OrthoRectification
+ OrthoRectification (utm)
+ Geometry
+ This application allows to ortho-rectify optical images from supported sensors.
+
+
+ ParameterRaster
+ io.in
+ Input Image
+ The input image to ortho-rectify
+ False
+
+
+ OutputRaster
+ io.out
+ Output Image
+ The ortho-rectified output image
+
+
+
+ ParameterSelection
+ map
+ Output Cartographic Map Projection
+ Parameters of the output map projection to be used.
+
+
+ utm
+
+
+ 0
+
+
+ ParameterNumber
+ map.utm.zone
+ Zone number
+ The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere)
+
+
+ 31
+
+
+ ParameterBoolean
+ map.utm.northhem
+ Northern Hemisphere
+ The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.
+ True
+
+
+ ParameterSelection
+ outputs.mode
+ Parameters estimation modes
+
+
+
+ autosize
+ autospacing
+
+
+ 0
+
+
+ ParameterNumber
+ outputs.default
+ Default pixel value
+ Default value to write when outside of input image.
+
+
+ 0
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ bco
+ nn
+ linear
+
+
+ 0
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ opt.ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
+ ParameterNumber
+ opt.gridspacing
+ Resampling grid spacing
+ Resampling is done according to a coordinate mapping deformation grid, whose pixel size is set by this parameter, and expressed in the coordinate system of the output image The closer to the output spacing this parameter is, the more precise will be the ortho-rectified image,but increasing this parameter will reduce processing time.
+
+
+ 4
+
+
diff --git a/python/plugins/processing/otb/description/OrthoRectification.txt b/python/plugins/processing/otb/description/OrthoRectification.txt
deleted file mode 100644
index 81c40ecb6cb1..000000000000
--- a/python/plugins/processing/otb/description/OrthoRectification.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-OrthoRectification
-otbcli_OrthoRectification
-Ortho-rectification
-Geometry
-ParameterRaster|-io.in|Input Image|False
-OutputRaster|-io.out|Output Image
-ParameterSelection|-map|Output Cartographic Map Projection|utm;lambert2;lambert93;wgs;epsg|4
-ParameterNumber|-map.utm.zone|Zone number|None|None|31
-ParameterBoolean|-map.utm.northhem|Northern Hemisphere|
-ParameterNumber|-map.epsg.code|EPSG Code|None|None|32631
-ParameterSelection|-outputs.mode|Parameters estimation modes|auto;autosize;autospacing|0
-ParameterNumber|-outputs.ulx|Upper Left X|None|None|0.0
-ParameterNumber|-outputs.uly|Upper Left Y|None|None|0.0
-ParameterNumber|-outputs.sizex|Size X|None|None|0
-ParameterNumber|-outputs.sizey|Size Y|None|None|0
-ParameterNumber|-outputs.spacingx|Pixel Size X|None|None|0.0
-ParameterNumber|-outputs.spacingy|Pixel Size Y|None|None|0.0
-ParameterBoolean|-outputs.isotropic|Force isotropic spacing by default|True
-ParameterNumber|-outputs.default|Default pixel value|None|None|0.0
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterString|-elev.dem|DEM directory|
-ParameterFile|-elev.geoid|Geoid File|
-ParameterNumber|-elev.default|Average Elevation|None|None|0.0
-ParameterSelection|-interpolator|Interpolation|nn;linear;bco|0
-ParameterNumber|-interpolator.bco.radius|Radius for bicubic interpolation|None|None|2
-ParameterNumber|-opt.rpc|RPC modeling (points per axis)|None|None|10
-ParameterNumber|-opt.ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-opt.gridspacing|Resampling grid spacing|None|None|4.0
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/Pansharpening-bayes.xml b/python/plugins/processing/otb/description/Pansharpening-bayes.xml
new file mode 100644
index 000000000000..50c390f607bc
--- /dev/null
+++ b/python/plugins/processing/otb/description/Pansharpening-bayes.xml
@@ -0,0 +1,67 @@
+
+ Pansharpening-bayes
+ otbcli_Pansharpening
+ Pansharpening (bayes)
+ Geometry
+ Perform P+XS pansharpening
+
+ ParameterRaster
+ inp
+ Input PAN Image
+ Input panchromatic image.
+ False
+
+
+ ParameterRaster
+ inxs
+ Input XS Image
+ Input XS image.
+ False
+
+
+ OutputRaster
+ out
+ Output image
+ Output image.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the pan-sharpening method.
+
+
+ bayes
+
+
+ 0
+
+
+ ParameterNumber
+ method.bayes.lambda
+ Weight
+ Set the weighting value.
+
+
+ 0.9999
+
+
+ ParameterNumber
+ method.bayes.s
+ S coefficient
+ Set the S coefficient.
+
+
+ 1
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/Pansharpening-lmvm.xml b/python/plugins/processing/otb/description/Pansharpening-lmvm.xml
new file mode 100644
index 000000000000..821e2928a183
--- /dev/null
+++ b/python/plugins/processing/otb/description/Pansharpening-lmvm.xml
@@ -0,0 +1,67 @@
+
+ Pansharpening-lmvm
+ otbcli_Pansharpening
+ Pansharpening (lmvm)
+ Geometry
+ Perform P+XS pansharpening
+
+ ParameterRaster
+ inp
+ Input PAN Image
+ Input panchromatic image.
+ False
+
+
+ ParameterRaster
+ inxs
+ Input XS Image
+ Input XS image.
+ False
+
+
+ OutputRaster
+ out
+ Output image
+ Output image.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the pan-sharpening method.
+
+
+ lmvm
+
+
+ 0
+
+
+ ParameterNumber
+ method.lmvm.radiusx
+ X radius
+ Set the x radius of the sliding window.
+
+
+ 3
+
+
+ ParameterNumber
+ method.lmvm.radiusy
+ Y radius
+ Set the y radius of the sliding window.
+
+
+ 3
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/Pansharpening-rcs.xml b/python/plugins/processing/otb/description/Pansharpening-rcs.xml
new file mode 100644
index 000000000000..c37f6f900ff8
--- /dev/null
+++ b/python/plugins/processing/otb/description/Pansharpening-rcs.xml
@@ -0,0 +1,49 @@
+
+ Pansharpening-rcs
+ otbcli_Pansharpening
+ Pansharpening (rcs)
+ Geometry
+ Perform P+XS pansharpening
+
+ ParameterRaster
+ inp
+ Input PAN Image
+ Input panchromatic image.
+ False
+
+
+ ParameterRaster
+ inxs
+ Input XS Image
+ Input XS image.
+ False
+
+
+ OutputRaster
+ out
+ Output image
+ Output image.
+
+
+
+ ParameterSelection
+ method
+ Algorithm
+ Selection of the pan-sharpening method.
+
+
+ rcs
+
+
+ 0
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/Pansharpening.txt b/python/plugins/processing/otb/description/Pansharpening.txt
deleted file mode 100644
index 64adffa00ce7..000000000000
--- a/python/plugins/processing/otb/description/Pansharpening.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Pansharpening
-otbcli_Pansharpening
-Pansharpening
-Image Manipulation
-ParameterRaster|-inp|Input PAN Image|False
-ParameterRaster|-inxs|Input XS Image|False
-OutputRaster|-out|Output Image
-ParameterSelection|-method|Algorithm|rcs;lmvm;bayes|0
-ParameterNumber|-method.lmvm.radiusx|X radius|None|None|3
-ParameterNumber|-method.lmvm.radiusy|Y radius|None|None|3
-ParameterNumber|-method.bayes.s|S coefficient|None|None|1
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/PixelValue.txt b/python/plugins/processing/otb/description/PixelValue.txt
deleted file mode 100644
index d37d7069bc4a..000000000000
--- a/python/plugins/processing/otb/description/PixelValue.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-PixelValue
-otbcli_PixelValue
-Pixel Value
-Miscellaneous
-ParameterRaster|-in|Input Image|False
-ParameterNumber|-coordx|Col index|None|None|0
-ParameterNumber|-coordy|Line index|None|None|0
-ParameterString|-value|Pixel Value|
diff --git a/python/plugins/processing/otb/description/PixelWiseBlockMatching.txt b/python/plugins/processing/otb/description/PixelWiseBlockMatching.txt
deleted file mode 100644
index 220eebbbac1d..000000000000
--- a/python/plugins/processing/otb/description/PixelWiseBlockMatching.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-PixelWiseBlockMatching
-otbcli_PixelWiseBlockMatching
- Pixel-wise Block-Matching
-Stereo
-ParameterRaster|-io.inleft|Left input image|False
-ParameterRaster|-io.inright|Right input image|False
-OutputRaster|-io.out|The output disparity map
-OutputRaster|-io.outmaskleft|The left output mask corresponding to all criterions
-OutputRaster|-io.outmaskright|The right output mask corresponding to all criterions
-ParameterBoolean|-io.outmetric|Output optimal metric values as well|
-ParameterRaster|-mask.inleft|Discard left pixels from mask image|True
-ParameterRaster|-mask.inright|Discard right pixels from mask image|True
-ParameterNumber|-mask.nodata|Discard pixels with no-data value|None|None|0.0
-ParameterNumber|-mask.variancet|Discard pixels with low local variance|None|None|100.0
-ParameterSelection|-bm.metric|Block-matching metric|ssd;ncc;lp|0
-ParameterNumber|-bm.metric.lp.p|p value|None|None|1.0
-ParameterNumber|-bm.radius|Radius of blocks|None|None|3
-ParameterNumber|-bm.minhd|Minimum horizontal disparity|None|None|0
-ParameterNumber|-bm.maxhd|Maximum horizontal disparity|None|None|0
-ParameterNumber|-bm.minvd|Minimum vertical disparity|None|None|0
-ParameterNumber|-bm.maxvd|Maximum vertical disparity|None|None|0
-ParameterSelection|-bm.subpixel|Sub-pixel interpolation|none;parabolic;triangular;dichotomy|0
-ParameterNumber|-bm.medianfilter.radius|Radius|None|None|0
-ParameterNumber|-bm.medianfilter.incoherence|Incoherence threshold|None|None|0.0
-ParameterSelection|-bm.initdisp|Initial disparities|none;uniform;maps|0
-ParameterNumber|-bm.initdisp.uniform.hdisp|Horizontal initial disparity|None|None|0
-ParameterNumber|-bm.initdisp.uniform.vdisp|Vertical initial disparity|None|None|0
-ParameterNumber|-bm.initdisp.uniform.hrad|Horizontal exploration radius|None|None|0
-ParameterNumber|-bm.initdisp.uniform.vrad|Vertical exploration radius|None|None|0
-ParameterRaster|-bm.initdisp.maps.hmap|Horizontal initial disparity map|False
-ParameterRaster|-bm.initdisp.maps.vmap|Vertical initial disparity map|False
-ParameterNumber|-bm.initdisp.maps.hrad|Horizontal exploration radius|None|None|0
-ParameterNumber|-bm.initdisp.maps.vrad|Vertical exploration radius|None|None|0
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/Quicklook.txt b/python/plugins/processing/otb/description/Quicklook.txt
deleted file mode 100644
index 4416b0adbfd9..000000000000
--- a/python/plugins/processing/otb/description/Quicklook.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Quicklook
-otbcli_Quicklook
-Quick Look
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-rox|ROI Origin X|None|None|0
-ParameterNumber|-roy|ROI Origin Y|None|None|0
-ParameterNumber|-rsx|ROI Size X|None|None|0
-ParameterNumber|-rsy|ROI Size Y|None|None|0
-ParameterNumber|-sr|Sampling ratio|None|None|2
-ParameterNumber|-sx|Size X|None|None|0
-ParameterNumber|-sy|Size Y|None|None|0
diff --git a/python/plugins/processing/otb/description/RadiometricIndices.txt b/python/plugins/processing/otb/description/RadiometricIndices.txt
deleted file mode 100644
index d2d588b4d193..000000000000
--- a/python/plugins/processing/otb/description/RadiometricIndices.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Radiometric Indices
-otbcli_RadiometricIndices
-Radiometric Indices
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-channels.blue|Blue Channel|None|None|1
-ParameterNumber|-channels.green|Green Channel|None|None|2
-ParameterNumber|-channels.red|Red Channel|None|None|3
-ParameterNumber|-channels.nir|NIR Channel|None|None|4
-ParameterNumber|-channels.mir|Mir Channel|None|None|5
-ParameterSelection|-list|Available Radiometric Indices|Vegetation:NDVI;Vegetation:TNDVI;Vegetation:RVI;Vegetation:SAVI;Vegetation:TSAVI;Vegetation:MSAVI;Vegetation:MSAVI2;Vegetation:GEMI;Vegetation:IPVI;Vegetation:LAIFromNDVILog;Vegetation:LAIFromReflLinear;Vegetation:LAIFromNDVIFormo;Water:NDWI;Water:NDWI2;Water:MNDWI;Water:NDPI;Water:NDTI;Soil:RI;Soil:CI;Soil:BI;Soil:BI2
diff --git a/python/plugins/processing/otb/description/RadiometricIndices.xml b/python/plugins/processing/otb/description/RadiometricIndices.xml
new file mode 100644
index 000000000000..504495fac13c
--- /dev/null
+++ b/python/plugins/processing/otb/description/RadiometricIndices.xml
@@ -0,0 +1,124 @@
+
+ RadiometricIndices
+ otbcli_RadiometricIndices
+ Radiometric Indices
+ Feature Extraction
+ Compute radiometric indices.
+
+ ParameterRaster
+ in
+ Input Image
+ Input image
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Radiometric indices output image
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterNumber
+ channels.blue
+ Blue Channel
+ Blue channel index
+
+
+ 1
+
+
+ ParameterNumber
+ channels.green
+ Green Channel
+ Green channel index
+
+
+ 1
+
+
+ ParameterNumber
+ channels.red
+ Red Channel
+ Red channel index
+
+
+ 1
+
+
+ ParameterNumber
+ channels.nir
+ NIR Channel
+ NIR channel index
+
+
+ 1
+
+
+ ParameterNumber
+ channels.mir
+ Mir Channel
+ Mir channel index
+
+
+ 1
+
+
+ ParameterSelection
+ list
+ Available Radiometric Indices
+ List of available radiometric indices with their relevant channels in brackets:
+ Vegetation:NDVI - Normalized difference vegetation index (Red, NIR)
+ Vegetation:TNDVI - Transformed normalized difference vegetation index (Red, NIR)
+ Vegetation:RVI - Ratio vegetation index (Red, NIR)
+ Vegetation:SAVI - Soil adjusted vegetation index (Red, NIR)
+ Vegetation:TSAVI - Transformed soil adjusted vegetation index (Red, NIR)
+ Vegetation:MSAVI - Modified soil adjusted vegetation index (Red, NIR)
+ Vegetation:MSAVI2 - Modified soil adjusted vegetation index 2 (Red, NIR)
+ Vegetation:GEMI - Global environment monitoring index (Red, NIR)
+ Vegetation:IPVI - Infrared percentage vegetation index (Red, NIR)
+
+ Water:NDWI - Normalized difference water index (Gao 1996) (NIR, MIR)
+ Water:NDWI2 - Normalized difference water index (Mc Feeters 1996) (Green, NIR)
+ Water:MNDWI - Modified normalized difference water index (Xu 2006) (Green, MIR)
+ Water:NDPI - Normalized difference pond index (Lacaux et al.) (MIR, Green)
+ Water:NDTI - Normalized difference turbidity index (Lacaux et al.) (Red, Green)
+
+ Soil:RI - Redness index (Red, Green)
+ Soil:CI - Color index (Red, Green)
+ Soil:BI - Brightness index (Red, Green)
+ Soil:BI2 - Brightness index 2 (NIR, Red, Green)
+
+
+ ndvi
+ tndvi
+ rvi
+ savi
+ tsavi
+ msavi
+ msavi2
+ gemi
+ ipvi
+ ndwi
+ ndwi2
+ mndwi
+ ndpi
+ ndti
+ ri
+ ci
+ bi
+ bi2
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/Rasterization.txt b/python/plugins/processing/otb/description/Rasterization.txt
deleted file mode 100644
index 8404dde39404..000000000000
--- a/python/plugins/processing/otb/description/Rasterization.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Rasterization
-otbcli_Rasterization
-Rasterization
-Vector Data Manipulation
-ParameterFile|-in|Input vector dataset||
-OutputRaster|-out|Ouptut image
-ParameterRaster|-im|Input reference image|True
-ParameterNumber|-szx|Output size x|None|None|0
-ParameterNumber|-szy|Output size y|None|None|0
-ParameterNumber|-epsg|Output EPSG code|None|None|0
-ParameterNumber|-orx|Output Upper-left x|None|None|0.0
-ParameterNumber|-ory|Output Upper-left y|None|None|0.0
-ParameterNumber|-spx|Spacing (GSD) x|None|None|0.0
-ParameterNumber|-spy|Spacing (GSD) y|None|None|0.0
-ParameterNumber|-background|Background value|None|None|0.0
-ParameterSelection|-mode|Rasterization mode|binary;attribute|0
-ParameterNumber|-mode.binary.foreground|Foreground value|None|None|255.0
-ParameterString|-mode.attribute.field|The attribute field to burn|DN
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/ReadImageInfo.txt b/python/plugins/processing/otb/description/ReadImageInfo.txt
deleted file mode 100644
index 7d5e2aa43f0f..000000000000
--- a/python/plugins/processing/otb/description/ReadImageInfo.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-ReadImageInfo
-otbcli_ReadImageInfo
-Read image information
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-ParameterBoolean|-keywordlist|Display the OSSIM keywordlist|
-ParameterNumber|-sizex|Size X|None|None|0
-ParameterNumber|-sizey|Size Y|None|None|0
-ParameterNumber|-spacingx|Pixel Size X|None|None|0.0
-ParameterNumber|-spacingy|Pixel Size Y|None|None|0.0
-ParameterNumber|-originx|Image Origin X|None|None|0.0
-ParameterNumber|-originy|Image Origin Y|None|None|0.0
-ParameterNumber|-estimatedgroundspacingx|Estimated ground spacing X|None|None|0.0
-ParameterNumber|-estimatedgroundspacingy|Estimated ground spacing Y|None|None|0.0
-ParameterNumber|-numberbands|Number Of Bands|None|None|0
-ParameterString|-sensor|Sensor id|
-ParameterString|-id|Image id|
-ParameterString|-time|Acquisition time|
-ParameterNumber|-ullat|Upper left lattitude|None|None|0.0
-ParameterNumber|-ullon|Upper left longitude|None|None|0.0
-ParameterNumber|-urlat|Upper right lattitude|None|None|0.0
-ParameterNumber|-urlon|Upper right longitude|None|None|0.0
-ParameterNumber|-lrlat|Lower right lattitude|None|None|0.0
-ParameterNumber|-lrlon|Lower right longitude|None|None|0.0
-ParameterNumber|-lllat|Lower left lattitude|None|None|0.0
-ParameterNumber|-lllon|Lower left longitude|None|None|0.0
-ParameterString|-town|Nearest town|
-ParameterString|-country|Country|
-ParameterNumber|-rgb.r|Red Band|None|None|1
-ParameterNumber|-rgb.g|Green Band|None|None|2
-ParameterNumber|-rgb.b|Blue Band|None|None|3
-ParameterString|-projectionref|Projection|
-ParameterString|-keyword|Keywordlist|
-ParameterNumber|-gcp.count|GCPs Number|None|None|0
-ParameterString|-gcp.proj|GCP Projection|
diff --git a/python/plugins/processing/otb/description/ReadImageInfo.xml b/python/plugins/processing/otb/description/ReadImageInfo.xml
new file mode 100644
index 000000000000..0e96948f43bc
--- /dev/null
+++ b/python/plugins/processing/otb/description/ReadImageInfo.xml
@@ -0,0 +1,57 @@
+
+ ReadImageInfo
+ otbcli_ReadImageInfo
+ Read image information
+ Image Manipulation
+ Get information about the image
+
+ ParameterRaster
+ in
+ Input Image
+ Input image to analyse
+ False
+
+
+ ParameterBoolean
+ keywordlist
+ Display the OSSIM keywordlist
+ Output the OSSIM keyword list. It contains metadata information (sensor model, geometry ). Informations are stored in keyword list (pairs of key/value)
+ True
+
+
+ ParameterString
+ gcp.ids
+ GCPs Id
+ GCPs identifier
+
+
+ False
+
+
+ ParameterString
+ gcp.info
+ GCPs Info
+ GCPs Information
+
+
+ False
+
+
+ ParameterString
+ gcp.imcoord
+ GCPs Image Coordinates
+ GCPs Image coordinates
+
+
+ False
+
+
+ ParameterString
+ gcp.geocoord
+ GCPs Geographic Coordinates
+ GCPs Geographic Coordinates
+
+
+ False
+
+
diff --git a/python/plugins/processing/otb/description/Rescale.txt b/python/plugins/processing/otb/description/Rescale.txt
deleted file mode 100644
index dcdefdfaade4..000000000000
--- a/python/plugins/processing/otb/description/Rescale.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Rescale
-otbcli_Rescale
-Rescale Image
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-outmin|Output min value|None|None|0.0
-ParameterNumber|-outmax|Output max value|None|None|255.0
diff --git a/python/plugins/processing/otb/description/Rescale.xml b/python/plugins/processing/otb/description/Rescale.xml
new file mode 100644
index 000000000000..af6d67ffbf6f
--- /dev/null
+++ b/python/plugins/processing/otb/description/Rescale.xml
@@ -0,0 +1,48 @@
+
+ Rescale
+ otbcli_Rescale
+ Rescale Image
+ Image Manipulation
+ Rescale the image between two given values.
+
+ ParameterRaster
+ in
+ Input Image
+ The image to scale.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ The rescaled image filename.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterNumber
+ outmin
+ Output min value
+ Minimum value of the output image.
+
+
+ 0
+
+
+ ParameterNumber
+ outmax
+ Output max value
+ Maximum value of the output image.
+
+
+ 255
+
+
diff --git a/python/plugins/processing/otb/description/RigidTransformResample-id.xml b/python/plugins/processing/otb/description/RigidTransformResample-id.xml
new file mode 100644
index 000000000000..8ff7125e59f1
--- /dev/null
+++ b/python/plugins/processing/otb/description/RigidTransformResample-id.xml
@@ -0,0 +1,83 @@
+
+ RigidTransformResample-id
+ otbcli_RigidTransformResample
+ RigidTransformResample (id)
+ Geometry
+ Resample an image with a rigid transform
+
+ ParameterRaster
+ in
+ Input image
+ The input image to translate.
+ False
+
+
+ OutputRaster
+ out
+ Output image
+ The transformed output image.
+
+
+
+ ParameterSelection
+ transform.type
+ Type of transformation
+ Type of transformation. Available transformations are spatial scaling, translation and rotation with scaling factor
+
+
+ id
+
+
+ 0
+
+
+ ParameterNumber
+ transform.type.id.scalex
+ X scaling
+ Scaling factor between the output X spacing and the input X spacing
+
+
+ 1
+
+
+ ParameterNumber
+ transform.type.id.scaley
+ Y scaling
+ Scaling factor between the output Y spacing and the input Y spacing
+
+
+ 1
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ nn
+ linear
+ bco
+
+
+ 2
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/RigidTransformResample-rotation.xml b/python/plugins/processing/otb/description/RigidTransformResample-rotation.xml
new file mode 100644
index 000000000000..80d7333fc4ff
--- /dev/null
+++ b/python/plugins/processing/otb/description/RigidTransformResample-rotation.xml
@@ -0,0 +1,92 @@
+
+ RigidTransformResample-rotation
+ otbcli_RigidTransformResample
+ RigidTransformResample (rotation)
+ Geometry
+ Resample an image with a rigid transform
+
+ ParameterRaster
+ in
+ Input image
+ The input image to translate.
+ False
+
+
+ OutputRaster
+ out
+ Output image
+ The transformed output image.
+
+
+
+ ParameterSelection
+ transform.type
+ Type of transformation
+ Type of transformation. Available transformations are spatial scaling, translation and rotation with scaling factor
+
+
+ rotation
+
+
+ 0
+
+
+ ParameterNumber
+ transform.type.rotation.angle
+ Rotation angle
+ The rotation angle in degree (values between -180 and 180)
+
+
+ 0
+
+
+ ParameterNumber
+ transform.type.rotation.scalex
+ X scaling
+ Scale factor between the X spacing of the rotated output image and the X spacing of the unrotated image
+
+
+ 1
+
+
+ ParameterNumber
+ transform.type.rotation.scaley
+ Y scaling
+ Scale factor between the Y spacing of the rotated output image and the Y spacing of the unrotated image
+
+
+ 1
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ nn
+ linear
+ bco
+
+
+ 2
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/RigidTransformResample-translation.xml b/python/plugins/processing/otb/description/RigidTransformResample-translation.xml
new file mode 100644
index 000000000000..031ef35889a3
--- /dev/null
+++ b/python/plugins/processing/otb/description/RigidTransformResample-translation.xml
@@ -0,0 +1,101 @@
+
+ RigidTransformResample-translation
+ otbcli_RigidTransformResample
+ RigidTransformResample (translation)
+ Geometry
+ Resample an image with a rigid transform
+
+ ParameterRaster
+ in
+ Input image
+ The input image to translate.
+ False
+
+
+ OutputRaster
+ out
+ Output image
+ The transformed output image.
+
+
+
+ ParameterSelection
+ transform.type
+ Type of transformation
+ Type of transformation. Available transformations are spatial scaling, translation and rotation with scaling factor
+
+
+ translation
+
+
+ 0
+
+
+ ParameterNumber
+ transform.type.translation.tx
+ The X translation (in physical units)
+ The translation value along X axis (in physical units).
+
+
+ 0
+
+
+ ParameterNumber
+ transform.type.translation.ty
+ The Y translation (in physical units)
+ The translation value along Y axis (in physical units)
+
+
+ 0
+
+
+ ParameterNumber
+ transform.type.translation.scalex
+ X scaling
+ Scaling factor between the output X spacing and the input X spacing
+
+
+ 1
+
+
+ ParameterNumber
+ transform.type.translation.scaley
+ Y scaling
+ Scaling factor between the output Y spacing and the input Y spacing
+
+
+ 1
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ nn
+ linear
+ bco
+
+
+ 2
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/RigidTransformResample.txt b/python/plugins/processing/otb/description/RigidTransformResample.txt
deleted file mode 100644
index 9bc8a6fa5ea9..000000000000
--- a/python/plugins/processing/otb/description/RigidTransformResample.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-RigidTransformResample
-otbcli_RigidTransformResample
-Image resampling with a rigid transform
-Geometry
-ParameterRaster|-in|Input image|False
-OutputRaster|-out|Output image
-ParameterSelection|-transform.type|Type of transformation|id;translation;rotation|0
-ParameterNumber|-transform.type.id.scalex|X scaling|None|None|1.0
-ParameterNumber|-transform.type.id.scaley|Y scaling|None|None|1.0
-ParameterNumber|-transform.type.translation.tx|The X translation (in physical units)|None|None|0.0
-ParameterNumber|-transform.type.translation.ty|The Y translation (in physical units)|None|None|0.0
-ParameterNumber|-transform.type.rotation.angle|Rotation angle|None|None|0.0
-ParameterNumber|-transform.type.rotation.scalex|X scaling|None|None|1.0
-ParameterNumber|-transform.type.rotation.scaley|Y scaling|None|None|1.0
-ParameterSelection|-interpolator|Interpolation|nn;linear;bco|2
-ParameterNumber|-interpolator.bco.radius|Radius for bicubic interpolation|None|None|2
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/SFSTextureExtraction.txt b/python/plugins/processing/otb/description/SFSTextureExtraction.txt
deleted file mode 100644
index 96f905644dc6..000000000000
--- a/python/plugins/processing/otb/description/SFSTextureExtraction.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-SFS Texture Extraction
-otbcli_SFSTextureExtraction
-SFS Texture Extraction
-Feature Extraction
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-channel|Selected Channel|None|None|1
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-parameters.spethre|Spectral Threshold|None|None|50
-ParameterNumber|-parameters.spathre|Spatial Threshold|None|None|100
-ParameterNumber|-parameters.nbdir|Number of Direction|None|None|20
-ParameterNumber|-parameters.alpha|Alpha|None|None|1
-ParameterNumber|-parameters.maxcons|Ratio Maximum Consideration Number |None|None|5
-
diff --git a/python/plugins/processing/otb/description/SOMClassification.txt b/python/plugins/processing/otb/description/SOMClassification.txt
deleted file mode 100644
index 4c00844ec04b..000000000000
--- a/python/plugins/processing/otb/description/SOMClassification.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-SOMClassification
-otbcli_SOMClassification
-SOM Classification
-Learning
-ParameterRaster|-in|InputImage|False
-OutputRaster|-out|OutputImage
-ParameterRaster|-vm|ValidityMask|True
-ParameterNumber|-tp|TrainingProbability|None|None|1.0
-ParameterNumber|-ts|TrainingSetSize|None|None|0
-ParameterNumber|-sl|StreamingLines|None|None|0
-OutputRaster|-som|SOM Map
-ParameterNumber|-sx|SizeX|None|None|32
-ParameterNumber|-sy|SizeY|None|None|32
-ParameterNumber|-nx|NeighborhoodX|None|None|10
-ParameterNumber|-ny|NeighborhoodY|None|None|10
-ParameterNumber|-ni|NumberIteration|None|None|5
-ParameterNumber|-bi|BetaInit|None|None|1.0
-ParameterNumber|-bf|BetaFinal|None|None|0.10000000149
-ParameterNumber|-iv|InitialValue|None|None|0.0
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-rand|set user defined seed|None|None|0
diff --git a/python/plugins/processing/otb/description/SOMClassification.xml b/python/plugins/processing/otb/description/SOMClassification.xml
new file mode 100644
index 000000000000..fd13d30e4363
--- /dev/null
+++ b/python/plugins/processing/otb/description/SOMClassification.xml
@@ -0,0 +1,152 @@
+
+ SOMClassification
+ otbcli_SOMClassification
+ SOM Classification
+ Learning
+ SOM image classification.
+
+ ParameterRaster
+ in
+ InputImage
+ Input image to classify.
+ False
+
+
+ OutputRaster
+ out
+ OutputImage
+ Output classified image (each pixel contains the index of its corresponding vector in the SOM).
+
+
+
+ ParameterRaster
+ vm
+ ValidityMask
+ Validity mask (only pixels corresponding to a mask value greater than 0 will be used for learning)
+ True
+
+
+ ParameterNumber
+ tp
+ TrainingProbability
+ Probability for a sample to be selected in the training set
+
+
+ 1
+
+
+ ParameterNumber
+ ts
+ TrainingSetSize
+ Maximum training set size (in pixels)
+
+
+ 0
+
+
+ ParameterNumber
+ sl
+ StreamingLines
+ Number of lines in each streaming block (used during data sampling)
+
+
+ 0
+
+
+ OutputRaster
+ som
+ SOM Map
+ Output image containing the Self-Organizing Map
+
+
+
+ ParameterNumber
+ sx
+ SizeX
+ X size of the SOM map
+
+
+ 32
+
+
+ ParameterNumber
+ sy
+ SizeY
+ Y size of the SOM map
+
+
+ 32
+
+
+ ParameterNumber
+ nx
+ NeighborhoodX
+ X size of the initial neighborhood in the SOM map
+
+
+ 10
+
+
+ ParameterNumber
+ ny
+ NeighborhoodY
+ Y size of the initial neighborhood in the SOM map
+
+
+ 10
+
+
+ ParameterNumber
+ ni
+ NumberIteration
+ Number of iterations for SOM learning
+
+
+ 5
+
+
+ ParameterNumber
+ bi
+ BetaInit
+ Initial learning coefficient
+
+
+ 1
+
+
+ ParameterNumber
+ bf
+ BetaFinal
+ Final learning coefficient
+
+
+ 0.1
+
+
+ ParameterNumber
+ iv
+ InitialValue
+ Maximum initial neuron weight
+
+
+ 0
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/SarRadiometricCalibration.txt b/python/plugins/processing/otb/description/SarRadiometricCalibration.txt
deleted file mode 100644
index d059f06228d4..000000000000
--- a/python/plugins/processing/otb/description/SarRadiometricCalibration.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-SarRadiometricCalibration
-otbcli_SarRadiometricCalibration
-SAR Radiometric calibration
-Calibration
-ParameterRaster|-in|Input Complex Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterBoolean|-noise|Disable Noise|
diff --git a/python/plugins/processing/otb/description/Segmentation-cc.xml b/python/plugins/processing/otb/description/Segmentation-cc.xml
new file mode 100644
index 000000000000..8fc792fabcc8
--- /dev/null
+++ b/python/plugins/processing/otb/description/Segmentation-cc.xml
@@ -0,0 +1,153 @@
+
+ Segmentation-cc
+ otbcli_Segmentation
+ Segmentation (cc)
+ Segmentation
+ Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to segment
+ False
+
+
+ ParameterSelection
+ filter
+ Segmentation algorithm
+ Choice of segmentation algorithm (mean-shift by default)
+
+
+ cc
+
+
+ 0
+
+
+ ParameterString
+ filter.cc.expr
+ Condition
+ User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 )
+
+
+ False
+
+
+ ParameterSelection
+ mode
+ Processing mode
+ Choice of processing mode, either raster or large-scale.
+
+
+ vector
+
+
+ 0
+
+
+ OutputVector
+ mode.vector.out
+ Output vector file
+ The output vector file or database (name can be anything understood by OGR)
+
+
+
+ ParameterSelection
+ mode.vector.outmode
+ Writing mode for the output vector file
+ This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.
+
+
+ ulco
+ ovw
+ ulovw
+ ulu
+
+
+ 0
+
+
+ ParameterRaster
+ mode.vector.inmask
+ Mask Image
+ Only pixels whose mask value is strictly positive will be segmented.
+ True
+
+
+ ParameterBoolean
+ mode.vector.neighbor
+ 8-neighbor connectivity
+ Activate 8-Neighborhood connectivity (default is 4).
+ True
+
+
+ ParameterBoolean
+ mode.vector.stitch
+ Stitch polygons
+ Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.
+ True
+
+
+ ParameterNumber
+ mode.vector.minsize
+ Minimum object size
+ Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.
+
+
+ 1
+
+
+ ParameterNumber
+ mode.vector.simplify
+ Simplify polygons
+ Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.
+
+
+ 0.1
+
+
+ ParameterString
+ mode.vector.layername
+ Layer name
+ Name of the layer in the vector file or database (default is Layer).
+ layer
+
+ False
+
+
+ ParameterString
+ mode.vector.fieldname
+ Geometry index field name
+ Name of the field holding the geometry index in the output vector file or database.
+ DN
+
+ False
+
+
+ ParameterNumber
+ mode.vector.tilesize
+ Tiles size
+ User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.
+
+
+ 1024
+
+
+ ParameterNumber
+ mode.vector.startlabel
+ Starting geometry index
+ Starting value of the geometry index field
+
+
+ 1
+
+
+ ParameterString
+ mode.vector.ogroptions
+ OGR options for layer creation
+ A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.
+
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/Segmentation-edison.xml b/python/plugins/processing/otb/description/Segmentation-edison.xml
new file mode 100644
index 000000000000..e02eac849bf6
--- /dev/null
+++ b/python/plugins/processing/otb/description/Segmentation-edison.xml
@@ -0,0 +1,180 @@
+
+ Segmentation-edison
+ otbcli_Segmentation
+ Segmentation (edison)
+ Segmentation
+ Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to segment
+ False
+
+
+ ParameterSelection
+ filter
+ Segmentation algorithm
+ Choice of segmentation algorithm (mean-shift by default)
+
+
+ edison
+
+
+ 0
+
+
+ ParameterNumber
+ filter.edison.spatialr
+ Spatial radius
+ Spatial radius defining neighborhood.
+
+
+ 5
+
+
+ ParameterNumber
+ filter.edison.ranger
+ Range radius
+ Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.
+
+
+ 15
+
+
+ ParameterNumber
+ filter.edison.minsize
+ Minimum region size
+ Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.
+
+
+ 100
+
+
+ ParameterNumber
+ filter.edison.scale
+ Scale factor
+ Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance).
+
+
+ 1
+
+
+ ParameterSelection
+ mode
+ Processing mode
+ Choice of processing mode, either raster or large-scale.
+
+
+ vector
+
+
+ 0
+
+
+ OutputVector
+ mode.vector.out
+ Output vector file
+ The output vector file or database (name can be anything understood by OGR)
+
+
+
+ ParameterSelection
+ mode.vector.outmode
+ Writing mode for the output vector file
+ This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.
+
+
+ ulco
+ ovw
+ ulovw
+ ulu
+
+
+ 0
+
+
+ ParameterRaster
+ mode.vector.inmask
+ Mask Image
+ Only pixels whose mask value is strictly positive will be segmented.
+ True
+
+
+ ParameterBoolean
+ mode.vector.neighbor
+ 8-neighbor connectivity
+ Activate 8-Neighborhood connectivity (default is 4).
+ True
+
+
+ ParameterBoolean
+ mode.vector.stitch
+ Stitch polygons
+ Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.
+ True
+
+
+ ParameterNumber
+ mode.vector.minsize
+ Minimum object size
+ Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.
+
+
+ 1
+
+
+ ParameterNumber
+ mode.vector.simplify
+ Simplify polygons
+ Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.
+
+
+ 0.1
+
+
+ ParameterString
+ mode.vector.layername
+ Layer name
+ Name of the layer in the vector file or database (default is Layer).
+ layer
+
+ False
+
+
+ ParameterString
+ mode.vector.fieldname
+ Geometry index field name
+ Name of the field holding the geometry index in the output vector file or database.
+ DN
+
+ False
+
+
+ ParameterNumber
+ mode.vector.tilesize
+ Tiles size
+ User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.
+
+
+ 1024
+
+
+ ParameterNumber
+ mode.vector.startlabel
+ Starting geometry index
+ Starting value of the geometry index field
+
+
+ 1
+
+
+ ParameterString
+ mode.vector.ogroptions
+ OGR options for layer creation
+ A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.
+
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/Segmentation-meanshift.xml b/python/plugins/processing/otb/description/Segmentation-meanshift.xml
new file mode 100644
index 000000000000..7f067343c9e4
--- /dev/null
+++ b/python/plugins/processing/otb/description/Segmentation-meanshift.xml
@@ -0,0 +1,189 @@
+
+ Segmentation-meanshift
+ otbcli_Segmentation
+ Segmentation (meanshift)
+ Segmentation
+ Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to segment
+ False
+
+
+ ParameterSelection
+ filter
+ Segmentation algorithm
+ Choice of segmentation algorithm (mean-shift by default)
+
+
+ meanshift
+
+
+ 0
+
+
+ ParameterNumber
+ filter.meanshift.spatialr
+ Spatial radius
+ Spatial radius of the neighborhood.
+
+
+ 5
+
+
+ ParameterNumber
+ filter.meanshift.ranger
+ Range radius
+ Range radius defining the radius (expressed in radiometry unit) in the multispectral space.
+
+
+ 15
+
+
+ ParameterNumber
+ filter.meanshift.thres
+ Mode convergence threshold
+ Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.
+
+
+ 0.1
+
+
+ ParameterNumber
+ filter.meanshift.maxiter
+ Maximum number of iterations
+ Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.
+
+
+ 100
+
+
+ ParameterNumber
+ filter.meanshift.minsize
+ Minimum region size
+ Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.
+
+
+ 100
+
+
+ ParameterSelection
+ mode
+ Processing mode
+ Choice of processing mode, either raster or large-scale.
+
+
+ vector
+
+
+ 0
+
+
+ OutputVector
+ mode.vector.out
+ Output vector file
+ The output vector file or database (name can be anything understood by OGR)
+
+
+
+ ParameterSelection
+ mode.vector.outmode
+ Writing mode for the output vector file
+ This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.
+
+
+ ulco
+ ovw
+ ulovw
+ ulu
+
+
+ 0
+
+
+ ParameterRaster
+ mode.vector.inmask
+ Mask Image
+ Only pixels whose mask value is strictly positive will be segmented.
+ True
+
+
+ ParameterBoolean
+ mode.vector.neighbor
+ 8-neighbor connectivity
+ Activate 8-Neighborhood connectivity (default is 4).
+ True
+
+
+ ParameterBoolean
+ mode.vector.stitch
+ Stitch polygons
+ Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.
+ True
+
+
+ ParameterNumber
+ mode.vector.minsize
+ Minimum object size
+ Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.
+
+
+ 1
+
+
+ ParameterNumber
+ mode.vector.simplify
+ Simplify polygons
+ Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.
+
+
+ 0.1
+
+
+ ParameterString
+ mode.vector.layername
+ Layer name
+ Name of the layer in the vector file or database (default is Layer).
+ layer
+
+ False
+
+
+ ParameterString
+ mode.vector.fieldname
+ Geometry index field name
+ Name of the field holding the geometry index in the output vector file or database.
+ DN
+
+ False
+
+
+ ParameterNumber
+ mode.vector.tilesize
+ Tiles size
+ User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.
+
+
+ 1024
+
+
+ ParameterNumber
+ mode.vector.startlabel
+ Starting geometry index
+ Starting value of the geometry index field
+
+
+ 1
+
+
+ ParameterString
+ mode.vector.ogroptions
+ OGR options for layer creation
+ A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.
+
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/Segmentation-mprofiles.xml b/python/plugins/processing/otb/description/Segmentation-mprofiles.xml
new file mode 100644
index 000000000000..6fc14a7c3858
--- /dev/null
+++ b/python/plugins/processing/otb/description/Segmentation-mprofiles.xml
@@ -0,0 +1,180 @@
+
+ Segmentation-mprofiles
+ otbcli_Segmentation
+ Segmentation (mprofiles)
+ Segmentation
+ Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to segment
+ False
+
+
+ ParameterSelection
+ filter
+ Segmentation algorithm
+ Choice of segmentation algorithm (mean-shift by default)
+
+
+ mprofiles
+
+
+ 0
+
+
+ ParameterNumber
+ filter.mprofiles.size
+ Profile Size
+ Size of the profiles
+
+
+ 5
+
+
+ ParameterNumber
+ filter.mprofiles.start
+ Initial radius
+ Initial radius of the structuring element (in pixels)
+
+
+ 1
+
+
+ ParameterNumber
+ filter.mprofiles.step
+ Radius step.
+ Radius step along the profile (in pixels)
+
+
+ 1
+
+
+ ParameterNumber
+ filter.mprofiles.sigma
+ Threshold of the final decision rule
+ Profiles values under the threshold will be ignored.
+
+
+ 1
+
+
+ ParameterSelection
+ mode
+ Processing mode
+ Choice of processing mode, either raster or large-scale.
+
+
+ vector
+
+
+ 0
+
+
+ OutputVector
+ mode.vector.out
+ Output vector file
+ The output vector file or database (name can be anything understood by OGR)
+
+
+
+ ParameterSelection
+ mode.vector.outmode
+ Writing mode for the output vector file
+ This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.
+
+
+ ulco
+ ovw
+ ulovw
+ ulu
+
+
+ 0
+
+
+ ParameterRaster
+ mode.vector.inmask
+ Mask Image
+ Only pixels whose mask value is strictly positive will be segmented.
+ True
+
+
+ ParameterBoolean
+ mode.vector.neighbor
+ 8-neighbor connectivity
+ Activate 8-Neighborhood connectivity (default is 4).
+ True
+
+
+ ParameterBoolean
+ mode.vector.stitch
+ Stitch polygons
+ Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.
+ True
+
+
+ ParameterNumber
+ mode.vector.minsize
+ Minimum object size
+ Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.
+
+
+ 1
+
+
+ ParameterNumber
+ mode.vector.simplify
+ Simplify polygons
+ Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.
+
+
+ 0.1
+
+
+ ParameterString
+ mode.vector.layername
+ Layer name
+ Name of the layer in the vector file or database (default is Layer).
+ layer
+
+ False
+
+
+ ParameterString
+ mode.vector.fieldname
+ Geometry index field name
+ Name of the field holding the geometry index in the output vector file or database.
+ DN
+
+ False
+
+
+ ParameterNumber
+ mode.vector.tilesize
+ Tiles size
+ User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.
+
+
+ 1024
+
+
+ ParameterNumber
+ mode.vector.startlabel
+ Starting geometry index
+ Starting value of the geometry index field
+
+
+ 1
+
+
+ ParameterString
+ mode.vector.ogroptions
+ OGR options for layer creation
+ A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.
+
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/Segmentation-watershed.xml b/python/plugins/processing/otb/description/Segmentation-watershed.xml
new file mode 100644
index 000000000000..7ef004e4f4fc
--- /dev/null
+++ b/python/plugins/processing/otb/description/Segmentation-watershed.xml
@@ -0,0 +1,162 @@
+
+ Segmentation-watershed
+ otbcli_Segmentation
+ Segmentation (watershed)
+ Segmentation
+ Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
+
+ ParameterRaster
+ in
+ Input Image
+ The input image to segment
+ False
+
+
+ ParameterSelection
+ filter
+ Segmentation algorithm
+ Choice of segmentation algorithm (mean-shift by default)
+
+
+ watershed
+
+
+ 0
+
+
+ ParameterNumber
+ filter.watershed.threshold
+ Depth Threshold
+ Depth threshold Units in percentage of the maximum depth in the image.
+
+
+ 0.01
+
+
+ ParameterNumber
+ filter.watershed.level
+ Flood Level
+ flood level for generating the merge tree from the initial segmentation (between 0 and 1)
+
+
+ 0.1
+
+
+ ParameterSelection
+ mode
+ Processing mode
+ Choice of processing mode, either raster or large-scale.
+
+
+ vector
+
+
+ 0
+
+
+ OutputVector
+ mode.vector.out
+ Output vector file
+ The output vector file or database (name can be anything understood by OGR)
+
+
+
+ ParameterSelection
+ mode.vector.outmode
+ Writing mode for the output vector file
+ This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.
+
+
+ ulco
+ ovw
+ ulovw
+ ulu
+
+
+ 0
+
+
+ ParameterRaster
+ mode.vector.inmask
+ Mask Image
+ Only pixels whose mask value is strictly positive will be segmented.
+ True
+
+
+ ParameterBoolean
+ mode.vector.neighbor
+ 8-neighbor connectivity
+ Activate 8-Neighborhood connectivity (default is 4).
+ True
+
+
+ ParameterBoolean
+ mode.vector.stitch
+ Stitch polygons
+ Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.
+ True
+
+
+ ParameterNumber
+ mode.vector.minsize
+ Minimum object size
+ Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.
+
+
+ 1
+
+
+ ParameterNumber
+ mode.vector.simplify
+ Simplify polygons
+ Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.
+
+
+ 0.1
+
+
+ ParameterString
+ mode.vector.layername
+ Layer name
+ Name of the layer in the vector file or database (default is Layer).
+ layer
+
+ False
+
+
+ ParameterString
+ mode.vector.fieldname
+ Geometry index field name
+ Name of the field holding the geometry index in the output vector file or database.
+ DN
+
+ False
+
+
+ ParameterNumber
+ mode.vector.tilesize
+ Tiles size
+ User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.
+
+
+ 1024
+
+
+ ParameterNumber
+ mode.vector.startlabel
+ Starting geometry index
+ Starting value of the geometry index field
+
+
+ 1
+
+
+ ParameterString
+ mode.vector.ogroptions
+ OGR options for layer creation
+ A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.
+
+
+ True
+
+
diff --git a/python/plugins/processing/otb/description/SimpleConnectedComponentsSegmentation.txt b/python/plugins/processing/otb/description/SimpleConnectedComponentsSegmentation.txt
deleted file mode 100644
index 63f40c8137ce..000000000000
--- a/python/plugins/processing/otb/description/SimpleConnectedComponentsSegmentation.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-SimpleConnectedComponentsSegmentation
-otbcli_Segmentation
-Simple Connected Components segmentation (labeled raster output)
-Segmentation
-ParameterRaster|-filter cc -in|Input Image|False
-ParameterString|-filter.cc.expr|Condition|
-OutputRaster|-mode raster -mode.raster.out|Output labeled image
-
diff --git a/python/plugins/processing/otb/description/SimpleConnectedComponentsSegmentation_vector.txt b/python/plugins/processing/otb/description/SimpleConnectedComponentsSegmentation_vector.txt
deleted file mode 100644
index 658d2efba648..000000000000
--- a/python/plugins/processing/otb/description/SimpleConnectedComponentsSegmentation_vector.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-SimpleConnectedComponentsSegmentationVector
-otbcli_Segmentation
-Simple Connected Components segmentation (large-scale, vector output)
-Segmentation
-ParameterRaster|-filter cc -in|Input Image|False
-ParameterString|-filter.cc.expr|Condition|
-OutputVector|-mode vector -mode.vector.out|Output vector file
-ParameterVector|-mode.vector.inmask|Mask Image|-1|True
-ParameterBoolean|-mode.vector.neighbor|8-neighbor connectivity|False
-ParameterBoolean|-mode.vector.stitch|Stitch polygons|True
-ParameterNumber|-mode.vector.minsize|Minimum object size|1|None|1
-ParameterNumber|-mode.vector.simplify|Simplify polygons|None|None|0.0
-ParameterString|-mode.vector.layername|Layer name |layer
-ParemeterString|-mode.vector.fieldname|Geometry index field name|DN
-ParameterNumber|-mode.vector.tilesize|Tile size|0|None|1024
-ParameterNumber|-mode.vector.startlabel|Starting geometry index|1|None|1
-ParameterSelection|-mode.vector.outmode|Writing mode (update file/overwrite file/overwrite layer/update layer)|ulco;ovw;ulovw;ulu|0
-ParameterString|-mode.vector.ogroptions|OGR options for layer creation|
diff --git a/python/plugins/processing/otb/description/Smoothing-anidif.xml b/python/plugins/processing/otb/description/Smoothing-anidif.xml
new file mode 100644
index 000000000000..565747cda838
--- /dev/null
+++ b/python/plugins/processing/otb/description/Smoothing-anidif.xml
@@ -0,0 +1,60 @@
+
+ Smoothing-anidif
+ otbcli_Smoothing
+ Smoothing (anidif)
+ Image Filtering
+ Apply a smoothing filter to an image
+
+ ParameterRaster
+ in
+ Input Image
+ Input image to smooth.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output smoothed image.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ type
+ Smoothing Type
+ Smoothing kernel to apply
+
+
+ anidif
+
+
+ 2
+
+
+ ParameterNumber
+ type.anidif.timestep
+ Time Step
+ Diffusion equation time step
+
+
+ 0.125
+
+
+ ParameterNumber
+ type.anidif.nbiter
+ Nb Iterations
+ Number of iterations
+
+
+ 10
+
+
diff --git a/python/plugins/processing/otb/description/Smoothing-gaussian.xml b/python/plugins/processing/otb/description/Smoothing-gaussian.xml
new file mode 100644
index 000000000000..a07675ba03e5
--- /dev/null
+++ b/python/plugins/processing/otb/description/Smoothing-gaussian.xml
@@ -0,0 +1,51 @@
+
+ Smoothing-gaussian
+ otbcli_Smoothing
+ Smoothing (gaussian)
+ Image Filtering
+ Apply a smoothing filter to an image
+
+ ParameterRaster
+ in
+ Input Image
+ Input image to smooth.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output smoothed image.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ type
+ Smoothing Type
+ Smoothing kernel to apply
+
+
+ gaussian
+
+
+ 2
+
+
+ ParameterNumber
+ type.gaussian.radius
+ Radius
+ Gaussian radius (in pixels)
+
+
+ 2
+
+
diff --git a/python/plugins/processing/otb/description/Smoothing-mean.xml b/python/plugins/processing/otb/description/Smoothing-mean.xml
new file mode 100644
index 000000000000..d093be754662
--- /dev/null
+++ b/python/plugins/processing/otb/description/Smoothing-mean.xml
@@ -0,0 +1,51 @@
+
+ Smoothing-mean
+ otbcli_Smoothing
+ Smoothing (mean)
+ Image Filtering
+ Apply a smoothing filter to an image
+
+ ParameterRaster
+ in
+ Input Image
+ Input image to smooth.
+ False
+
+
+ OutputRaster
+ out
+ Output Image
+ Output smoothed image.
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
+ ParameterSelection
+ type
+ Smoothing Type
+ Smoothing kernel to apply
+
+
+ mean
+
+
+ 2
+
+
+ ParameterNumber
+ type.mean.radius
+ Radius
+ Mean radius (in pixels)
+
+
+ 2
+
+
diff --git a/python/plugins/processing/otb/description/Smoothing.txt b/python/plugins/processing/otb/description/Smoothing.txt
deleted file mode 100644
index 417be6429745..000000000000
--- a/python/plugins/processing/otb/description/Smoothing.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Smoothing
-otbcli_Smoothing
-Smoothing
-Image Filtering
-ParameterRaster|-in|Input Image|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterSelection|-type|Smoothing Type|mean;gaussian;anidif|2
-ParameterNumber|-type.mean.radius|Radius|None|None|2
-ParameterNumber|-type.gaussian.radius|Radius|None|None|2
-ParameterNumber|-type.anidif.timestep|Time Step|None|None|0.125
-ParameterNumber|-type.anidif.nbiter|Nb Iterations|None|None|10
diff --git a/python/plugins/processing/otb/description/SplitImage.txt b/python/plugins/processing/otb/description/SplitImage.txt
deleted file mode 100644
index be27aa391306..000000000000
--- a/python/plugins/processing/otb/description/SplitImage.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-SplitImage
-otbcli_SplitImage
-Split Image
-Image Manipulation
-ParameterRaster|-in|Input Image|False
-OutputFile|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/SplitImage.xml b/python/plugins/processing/otb/description/SplitImage.xml
new file mode 100644
index 000000000000..298a935096ae
--- /dev/null
+++ b/python/plugins/processing/otb/description/SplitImage.xml
@@ -0,0 +1,30 @@
+
+ SplitImage
+ otbcli_SplitImage
+ Split Image
+ Image Manipulation
+ Split a N multiband image into N images
+
+ ParameterRaster
+ in
+ Input Image
+ Input multiband image filename.
+ False
+
+
+ OutputFile
+ out
+ Output Image
+ Output filename that will be used to get the prefix and the extension of the output images to write
+
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/StereoFramework.xml b/python/plugins/processing/otb/description/StereoFramework.xml
new file mode 100644
index 000000000000..f85f2400655c
--- /dev/null
+++ b/python/plugins/processing/otb/description/StereoFramework.xml
@@ -0,0 +1,315 @@
+
+ StereoFramework
+ otbcli_StereoFramework
+ Stereo Framework
+ Stereo
+ Compute the ground elevation based on one or multiple stereo pair(s)
+
+ ParameterMultipleInput
+ input.il
+ Input images list
+ The list of images.
+
+ False
+
+
+ ParameterString
+ input.co
+ Couples list
+ List of index of couples im image list. Couples must be separated by a comma. (index start at 0). for example : 0 1,1 2 will process a first couple composed of the first and the second image in image list, then the first and the third image
+. note that images are handled by pairs. if left empty couples are created from input index i.e. a first couple will be composed of the first and second image, a second couple with third and fourth image etc. (in this case image list must be even).
+
+
+ True
+
+
+ ParameterNumber
+ input.channel
+ Image channel used for the block matching
+ Used channel for block matching (used for all images)
+
+
+ 1
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ output.res
+ Output resolution
+ Spatial sampling distance of the output elevation : the cell size (in m)
+
+
+ 1
+
+
+ ParameterNumber
+ output.nodata
+ NoData value
+ DSM empty cells are filled with this value (optional -32768 by default)
+
+
+ -32768
+
+
+ ParameterSelection
+ output.fusionmethod
+ Method to fuse measures in each DSM cell
+ This parameter allows to choose the method used to fuse elevation measurements in each output DSM cell
+
+
+ max
+ min
+ mean
+ acc
+
+
+ 0
+
+
+ OutputRaster
+ output.out
+ Output DSM
+ Output elevation image
+
+
+
+ ParameterSelection
+ output.mode
+ Parameters estimation modes
+
+
+
+ fit
+ user
+
+
+ 0
+
+
+ ParameterNumber
+ output.mode.user.ulx
+ Upper Left X
+ Cartographic X coordinate of upper-left corner (meters for cartographic projections, degrees for geographic ones)
+
+
+ 0.0
+
+
+ ParameterNumber
+ output.mode.user.uly
+ Upper Left Y
+ Cartographic Y coordinate of the upper-left corner (meters for cartographic projections, degrees for geographic ones)
+
+
+ 0.0
+
+
+ ParameterNumber
+ output.mode.user.sizex
+ Size X
+ Size of projected image along X (in pixels)
+
+
+ 0
+
+
+ ParameterNumber
+ output.mode.user.sizey
+ Size Y
+ Size of projected image along Y (in pixels)
+
+
+ 0
+
+
+ ParameterNumber
+ output.mode.user.spacingx
+ Pixel Size X
+ Size of each pixel along X axis (meters for cartographic projections, degrees for geographic ones)
+
+
+ 0.0
+
+
+ ParameterNumber
+ output.mode.user.spacingy
+ Pixel Size Y
+ Size of each pixel along Y axis (meters for cartographic projections, degrees for geographic ones)
+
+
+ 0.0
+
+
+ ParameterSelection
+ map
+ Output Cartographic Map Projection
+ Parameters of the output map projection to be used.
+
+
+ utm
+ lambert2
+ lambert93
+ wgs
+ epsg
+
+
+ 3
+
+
+ ParameterNumber
+ map.utm.zone
+ Zone number
+ The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere)
+
+
+ 31
+
+
+ ParameterBoolean
+ map.utm.northhem
+ Northern Hemisphere
+ The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.
+ True
+
+
+ ParameterNumber
+ map.epsg.code
+ EPSG Code
+ See www.spatialreference.org to find which EPSG code is associated to your projection
+
+
+ 4326
+
+
+ ParameterNumber
+ stereorect.fwdgridstep
+ Step of the deformation grid (in pixels)
+ Stereo-rectification deformation grid only varies slowly. Therefore, it is recommended to use a coarser grid (higher step value) in case of large images
+
+
+ 16
+
+
+ ParameterNumber
+ stereorect.invgridssrate
+ Sub-sampling rate for epipolar grid inversion
+ Grid inversion is an heavy process that implies spline regression on control points. To avoid eating to much memory, this parameter allows to first sub-sample the field to invert.
+
+
+ 10
+
+
+ ParameterSelection
+ bm.metric
+ Block-matching metric
+
+
+
+ ssdmean
+ ssd
+ ncc
+ lp
+
+
+ 0
+
+
+ ParameterNumber
+ bm.metric.lp.p
+ p value
+ Value of the p parameter in Lp pseudo-norm (must be positive)
+
+
+ 1
+
+
+ ParameterNumber
+ bm.radius
+ Radius of blocks for matching filter (in pixels)
+ The radius of blocks in Block-Matching (in pixels)
+
+
+ 2
+
+
+ ParameterNumber
+ bm.minhoffset
+ Minimum altitude offset (in meters)
+ Minimum altitude below the selected elevation source (in meters)
+
+
+ -20
+
+
+ ParameterNumber
+ bm.maxhoffset
+ Maximum altitude offset (in meters)
+ Maximum altitude above the selected elevation source (in meters)
+
+
+ 20
+
+
+ ParameterBoolean
+ postproc.bij
+ Use bijection consistency in block matching strategy
+ use bijection consistency. Right to Left correlation is computed to validate Left to Right disparities. If bijection is not found pixel is rejected.
+ True
+
+
+ ParameterBoolean
+ postproc.med
+ Use median disparities filtering
+ disparities output can be filtered using median post filtering (disabled by default).
+ True
+
+
+ ParameterNumber
+ postproc.metrict
+ Correlation metric threshold
+ Use block matching metric output to discard pixels with low correlation value (disabled by default, float value)
+
+
+ 0.6
+
+
+ ParameterRaster
+ mask.left
+ Input left mask
+ Mask for left input image
+ True
+
+
+ ParameterRaster
+ mask.right
+ Input right mask
+ Mask for right input image
+ True
+
+
+ ParameterNumber
+ mask.variancet
+ Discard pixels with low local variance
+ This parameter allows to discard pixels whose local variance is too small (the size of the neighborhood is given by the radius parameter)
+
+
+ 50
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/StereoRectificationGridGenerator.txt b/python/plugins/processing/otb/description/StereoRectificationGridGenerator.txt
deleted file mode 100644
index 0da27dc1f4f7..000000000000
--- a/python/plugins/processing/otb/description/StereoRectificationGridGenerator.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-StereoRectificationGridGenerator
-otbcli_StereoRectificationGridGenerator
-Stereo-rectification deformation grid generator
-Geometry
-ParameterRaster|-io.inleft|Left input image|False
-ParameterRaster|-io.inright|Right input image|False
-OutputRaster|-io.outleft|Left output deformation grid
-OutputRaster|-io.outright|Right output deformation grid
-ParameterSelection|-epi.elevation|Elevation management|avg;avgdem;dem|0
-ParameterNumber|-epi.elevation.avg.value|Average elevation value|None|None|0.0
-ParameterFile|-epi.elevation.avgdem.path|DEM directory|
-ParameterFile|-epi.elevation.avgdem.geoid|Geoid file||
-ParameterNumber|-epi.elevation.avgdem.step|Sub-sampling step|None|None|1
-ParameterNumber|-epi.elevation.avgdem.value|Average elevation value|None|None|0.0
-ParameterNumber|-epi.elevation.avgdem.mindisp|Minimum disparity from DEM|None|None|0.0
-ParameterNumber|-epi.elevation.avgdem.maxdisp|Maximum disparity from DEM|None|None|0.0
-ParameterFile|-epi.elevation.dem.path|DEM directory|
-ParameterFile|-epi.elevation.dem.geoid|Geoid file||
-ParameterNumber|-epi.scale|Scale of epipolar images|None|None|1.0
-ParameterNumber|-epi.step|Step of the deformation grid (in nb. of pixels)|None|None|1
-ParameterNumber|-epi.rectsizex|Rectified image size X|None|None|0
-ParameterNumber|-epi.rectsizey|Rectified image size Y|None|None|0
-ParameterNumber|-epi.baseline|Mean baseline ratio|None|None|0.0
-OutputRaster|-inverse.outleft|Left inverse deformation grid
-OutputRaster|-inverse.outright|Right inverse deformation grid
-ParameterNumber|-inverse.ssrate|Sub-sampling rate for inversion|None|None|16
diff --git a/python/plugins/processing/otb/description/StereoSensorModelToElevationMap.txt b/python/plugins/processing/otb/description/StereoSensorModelToElevationMap.txt
deleted file mode 100644
index 8ae0950dcab0..000000000000
--- a/python/plugins/processing/otb/description/StereoSensorModelToElevationMap.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-StereoSensorModelToElevationMap
-otbcli_StereoSensorModelToElevationMap
-Stereo sensor model to elevation map
-Stereo
-ParameterRaster|-ref|Reference|False
-ParameterRaster|-sec|Secondary|False
-OutputRaster|-out|Output Image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
-ParameterNumber|-r|Radius|None|None|3
-ParameterNumber|-ct|Correlation Threshold|None|None|0.699999988079
-ParameterNumber|-vt|Variance Threshold|None|None|4.0
-ParameterNumber|-minh|MinHeightOffset|None|None|-20.0
-ParameterNumber|-maxh|MaxHeightOffset|None|None|20.0
-ParameterNumber|-step|HeightStep|None|None|1.0
-ParameterNumber|-ae|AverageElevation|None|None|0.0
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterNumber|-rgs|ReferenceGaussianSmoothing|None|None|1.0
-ParameterNumber|-sgs|SecondaryGaussianSmoothing|None|None|1.0
-ParameterBoolean|-s|SubtractInitialHeight|
diff --git a/python/plugins/processing/otb/description/Superimpose.txt b/python/plugins/processing/otb/description/Superimpose.txt
deleted file mode 100644
index 98da6734b597..000000000000
--- a/python/plugins/processing/otb/description/Superimpose.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Superimpose
-otbcli_Superimpose
-Superimpose sensor
-Geometry
-ParameterRaster|-inr|Reference input|False
-ParameterRaster|-inm|The image to reproject|False
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-ParameterNumber|-lms|Spacing of the deformation field|None|None|4.0
-OutputRaster|-out|Output image
-ParameterNumber|-ram|Available RAM (Mb)|None|None|128
diff --git a/python/plugins/processing/otb/description/Superimpose.xml b/python/plugins/processing/otb/description/Superimpose.xml
new file mode 100644
index 000000000000..d9c4aa3faba9
--- /dev/null
+++ b/python/plugins/processing/otb/description/Superimpose.xml
@@ -0,0 +1,78 @@
+
+ Superimpose
+ otbcli_Superimpose
+ Superimpose sensor
+ Geometry
+ Using available image metadata, project one image onto another one
+
+ ParameterRaster
+ inr
+ Reference input
+ The input reference image.
+ False
+
+
+ ParameterRaster
+ inm
+ The image to reproject
+ The image to reproject into the geometry of the reference input.
+ False
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ lms
+ Spacing of the deformation field
+ Generate a coarser deformation field with the given spacing
+
+
+ 4
+
+
+ OutputRaster
+ out
+ Output image
+ Output reprojected image.
+
+
+
+ ParameterSelection
+ interpolator
+ Interpolation
+ This group of parameters allows to define how the input image will be interpolated during resampling.
+
+
+ bco
+ nn
+ linear
+
+
+ 0
+
+
+ ParameterNumber
+ interpolator.bco.radius
+ Radius for bicubic interpolation
+ This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
+
+
+ 2
+
+
+ ParameterNumber
+ ram
+ Available RAM (Mb)
+ Available memory for processing (in MB)
+
+
+ 128
+
+
diff --git a/python/plugins/processing/otb/description/TileFusion.xml b/python/plugins/processing/otb/description/TileFusion.xml
new file mode 100644
index 000000000000..1a9ca2292207
--- /dev/null
+++ b/python/plugins/processing/otb/description/TileFusion.xml
@@ -0,0 +1,40 @@
+
+ TileFusion
+ otbcli_TileFusion
+ Image Tile Fusion
+ Image Manipulation
+ Fusion of an image made of several tile files.
+
+ ParameterMultipleInput
+ il
+ Input Tile Images
+ Input tiles to concatenate (in lexicographic order : (0,0) (1,0) (0,1) (1,1)).
+
+ False
+
+
+ ParameterNumber
+ cols
+ Number of tile columns
+ Number of columns in the tile array
+
+
+ 0
+
+
+ ParameterNumber
+ rows
+ Number of tile rows
+ Number of rows in the tile array
+
+
+ 0
+
+
+ OutputRaster
+ out
+ Output Image
+ Output entire image
+
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-ann.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-ann.xml
new file mode 100644
index 000000000000..42a82ed6f59e
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-ann.xml
@@ -0,0 +1,240 @@
+
+ TrainImagesClassifier-ann
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (ann)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ ann
+
+
+ 0
+
+
+ ParameterSelection
+ classifier.ann.t
+ Train Method Type
+ Type of training method for the multilayer perceptron (MLP) neural network.
+
+
+ reg
+ back
+
+
+ 0
+
+
+ ParameterString
+ classifier.ann.sizes
+ Number of neurons in each intermediate layer
+ The number of neurons in each intermediate layer (excluding input and output layers).
+
+
+ False
+
+
+ ParameterSelection
+ classifier.ann.f
+ Neuron activation function type
+ Neuron activation function.
+
+
+ ident
+ sig
+ gau
+
+
+ 1
+
+
+ ParameterNumber
+ classifier.ann.a
+ Alpha parameter of the activation function
+ Alpha parameter of the activation function (used only with sigmoid and gaussian functions).
+
+
+ 1
+
+
+ ParameterNumber
+ classifier.ann.b
+ Beta parameter of the activation function
+ Beta parameter of the activation function (used only with sigmoid and gaussian functions).
+
+
+ 1
+
+
+ ParameterNumber
+ classifier.ann.bpdw
+ Strength of the weight gradient term in the BACKPROP method
+ Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.
+
+
+ 0.1
+
+
+ ParameterNumber
+ classifier.ann.bpms
+ Strength of the momentum term (the difference between weights on the 2 previous iterations)
+ Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
+
+
+ 0.1
+
+
+ ParameterNumber
+ classifier.ann.rdw
+ Initial value Delta_0 of update-values Delta_{ij} in RPROP method
+ Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).
+
+
+ 0.1
+
+
+ ParameterNumber
+ classifier.ann.rdwm
+ Update-values lower limit Delta_{min} in RPROP method
+ Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).
+
+
+ 1e-07
+
+
+ ParameterSelection
+ classifier.ann.term
+ Termination criteria
+ Termination criteria.
+
+
+ iter
+ eps
+ all
+
+
+ 2
+
+
+ ParameterNumber
+ classifier.ann.eps
+ Epsilon value used in the Termination criteria
+ Epsilon value used in the Termination criteria.
+
+
+ 0.01
+
+
+ ParameterNumber
+ classifier.ann.iter
+ Maximum number of iterations used in the Termination criteria
+ Maximum number of iterations used in the Termination criteria.
+
+
+ 1000
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-bayes.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-bayes.xml
new file mode 100644
index 000000000000..3366123704be
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-bayes.xml
@@ -0,0 +1,118 @@
+
+ TrainImagesClassifier-bayes
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (bayes)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ bayes
+
+
+ 0
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-boost.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-boost.xml
new file mode 100644
index 000000000000..29a85340f5e8
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-boost.xml
@@ -0,0 +1,160 @@
+
+ TrainImagesClassifier-boost
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (boost)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ boost
+
+
+ 0
+
+
+ ParameterSelection
+ classifier.boost.t
+ Boost Type
+ Type of Boosting algorithm.
+
+
+ discrete
+ real
+ logit
+ gentle
+
+
+ 1
+
+
+ ParameterNumber
+ classifier.boost.w
+ Weak count
+ The number of weak classifiers.
+
+
+ 100
+
+
+ ParameterNumber
+ classifier.boost.r
+ Weight Trim Rate
+ A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.
+
+
+ 0.95
+
+
+ ParameterNumber
+ classifier.boost.m
+ Maximum depth of the tree
+ Maximum depth of the tree.
+
+
+ 1
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-dt.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-dt.xml
new file mode 100644
index 000000000000..447971850a98
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-dt.xml
@@ -0,0 +1,177 @@
+
+ TrainImagesClassifier-dt
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (dt)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ dt
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.dt.max
+ Maximum depth of the tree
+ The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.
+
+
+ 65535
+
+
+ ParameterNumber
+ classifier.dt.min
+ Minimum number of samples in each node
+ If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.
+
+
+ 10
+
+
+ ParameterNumber
+ classifier.dt.ra
+ Termination criteria for regression tree
+
+
+
+ 0.01
+
+
+ ParameterNumber
+ classifier.dt.cat
+ Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split
+ Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.
+
+
+ 10
+
+
+ ParameterNumber
+ classifier.dt.f
+ K-fold cross-validations
+ If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.
+
+
+ 10
+
+
+ ParameterBoolean
+ classifier.dt.r
+ Set Use1seRule flag to false
+ If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.
+ True
+
+
+ ParameterBoolean
+ classifier.dt.t
+ Set TruncatePrunedTree flag to false
+ If true, then pruned branches are physically removed from the tree.
+ True
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-gbt.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-gbt.xml
new file mode 100644
index 000000000000..33483a0cf10c
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-gbt.xml
@@ -0,0 +1,154 @@
+
+ TrainImagesClassifier-gbt
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (gbt)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ gbt
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.gbt.w
+ Number of boosting algorithm iterations
+ Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.
+
+
+ 200
+
+
+ ParameterNumber
+ classifier.gbt.s
+ Regularization parameter
+ Regularization parameter.
+
+
+ 0.01
+
+
+ ParameterNumber
+ classifier.gbt.p
+ Portion of the whole training set used for each algorithm iteration
+ Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.
+
+
+ 0.8
+
+
+ ParameterNumber
+ classifier.gbt.max
+ Maximum depth of the tree
+ The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.
+
+
+ 3
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-knn.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-knn.xml
new file mode 100644
index 000000000000..fed27c0d5248
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-knn.xml
@@ -0,0 +1,127 @@
+
+ TrainImagesClassifier-knn
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (knn)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ knn
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.knn.k
+ Number of Neighbors
+ The number of neighbors to use.
+
+
+ 32
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-libsvm.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-libsvm.xml
new file mode 100644
index 000000000000..37352811ce97
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-libsvm.xml
@@ -0,0 +1,149 @@
+
+ TrainImagesClassifier-libsvm
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (libsvm)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ libsvm
+
+
+ 0
+
+
+ ParameterSelection
+ classifier.libsvm.k
+ SVM Kernel Type
+ SVM Kernel Type.
+
+
+ linear
+ rbf
+ poly
+ sigmoid
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.libsvm.c
+ Cost parameter C
+ SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.
+
+
+ 1
+
+
+ ParameterBoolean
+ classifier.libsvm.opt
+ Parameters optimization
+ SVM parameters optimization flag.
+ True
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-rf.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-rf.xml
new file mode 100644
index 000000000000..457197ebe615
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-rf.xml
@@ -0,0 +1,181 @@
+
+ TrainImagesClassifier-rf
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (rf)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ rf
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.rf.max
+ Maximum depth of the tree
+ The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.
+
+
+ 5
+
+
+ ParameterNumber
+ classifier.rf.min
+ Minimum number of samples in each node
+ If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.
+
+
+ 10
+
+
+ ParameterNumber
+ classifier.rf.ra
+ Termination Criteria for regression tree
+ If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.rf.cat
+ Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split
+ Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.
+
+
+ 10
+
+
+ ParameterNumber
+ classifier.rf.var
+ Size of the randomly selected subset of features at each tree node
+ The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.rf.nbtrees
+ Maximum number of trees in the forest
+ The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.
+
+
+ 100
+
+
+ ParameterNumber
+ classifier.rf.acc
+ Sufficient accuracy (OOB error)
+ Sufficient accuracy (OOB error).
+
+
+ 0.01
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainImagesClassifier-svm.xml b/python/plugins/processing/otb/description/TrainImagesClassifier-svm.xml
new file mode 100644
index 000000000000..c135e851b771
--- /dev/null
+++ b/python/plugins/processing/otb/description/TrainImagesClassifier-svm.xml
@@ -0,0 +1,202 @@
+
+ TrainImagesClassifier-svm
+ otbcli_TrainImagesClassifier
+ TrainImagesClassifier (svm)
+ Learning
+ Train a classifier from multiple pairs of images and training vector data.
+
+ ParameterMultipleInput
+ io.il
+ Input Image List
+ A list of input images.
+
+ False
+
+
+ ParameterMultipleInput
+ io.vd
+ Input Vector Data List
+ A list of vector data to select the training samples.
+
+ False
+
+
+ ParameterFile
+ io.imstat
+ Input XML image statistics file
+ Input XML file containing the mean and the standard deviation of the input images.
+
+ True
+
+
+ OutputFile
+ io.confmatout
+ Output confusion matrix
+ Output file containing the confusion matrix (.csv format).
+
+
+
+ OutputFile
+ io.out
+ Output model
+ Output file containing the model estimated (.txt format).
+
+
+
+ ParameterNumber
+ elev.default
+ Default elevation
+ This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.
+
+
+ 0
+
+
+ ParameterNumber
+ sample.mt
+ Maximum training sample size per class
+ Maximum size per class (in pixels) of the training sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available training sample list per class will be equal to the surface area of the smallest class multiplied by the training sample ratio.
+
+
+ 1000
+
+
+ ParameterNumber
+ sample.mv
+ Maximum validation sample size per class
+ Maximum size per class (in pixels) of the validation sample list (default = 1000) (no limit = -1). If equal to -1, then the maximal size of the available validation sample list per class will be equal to the surface area of the smallest class multiplied by the validation sample ratio.
+
+
+ 1000
+
+
+ ParameterBoolean
+ sample.edg
+ On edge pixel inclusion
+ Takes pixels on polygon edge into consideration when building training and validation samples.
+ True
+
+
+ ParameterNumber
+ sample.vtr
+ Training and validation sample ratio
+ Ratio between training and validation samples (0.0 = all training, 1.0 = all validation) (default = 0.5).
+
+
+ 0.5
+
+
+ ParameterString
+ sample.vfn
+ Name of the discrimination field
+ Name of the field used to discriminate class labels in the input vector data files.
+ Class
+
+ False
+
+
+ ParameterSelection
+ classifier
+ Classifier to use for the training
+ Choice of the classifier to use for the training.
+
+
+ svm
+
+
+ 0
+
+
+ ParameterSelection
+ classifier.svm.m
+ SVM Model Type
+ Type of SVM formulation.
+
+
+ csvc
+ nusvc
+ oneclass
+
+
+ 0
+
+
+ ParameterSelection
+ classifier.svm.k
+ SVM Kernel Type
+ SVM Kernel Type.
+
+
+ linear
+ rbf
+ poly
+ sigmoid
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.svm.c
+ Cost parameter C
+ SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.
+
+
+ 1
+
+
+ ParameterNumber
+ classifier.svm.nu
+ Parameter nu of a SVM optimization problem (NU_SVC / ONE_CLASS)
+ Parameter nu of a SVM optimization problem.
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.svm.coef0
+ Parameter coef0 of a kernel function (POLY / SIGMOID)
+ Parameter coef0 of a kernel function (POLY / SIGMOID).
+
+
+ 0
+
+
+ ParameterNumber
+ classifier.svm.gamma
+ Parameter gamma of a kernel function (POLY / RBF / SIGMOID)
+ Parameter gamma of a kernel function (POLY / RBF / SIGMOID).
+
+
+ 1
+
+
+ ParameterNumber
+ classifier.svm.degree
+ Parameter degree of a kernel function (POLY)
+ Parameter degree of a kernel function (POLY).
+
+
+ 1
+
+
+ ParameterBoolean
+ classifier.svm.opt
+ Parameters optimization
+ SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.
+ True
+
+
+ ParameterNumber
+ rand
+ set user defined seed
+ Set specific seed. with integer value.
+
+
+ 0
+
+
diff --git a/python/plugins/processing/otb/description/TrainSVMImagesClassifier.txt b/python/plugins/processing/otb/description/TrainSVMImagesClassifier.txt
deleted file mode 100644
index 72a36ea1efe7..000000000000
--- a/python/plugins/processing/otb/description/TrainSVMImagesClassifier.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-TrainSVMImagesClassifier
-otbcli_TrainSVMImagesClassifier
-Train SVM classifier from multiple images
-Learning
-ParameterMultipleInput|-io.il|Input Image List|3|False
-ParameterMultipleInput|-io.vd|Vector Data List|-1|False
-ParameterFile|-io.imstat|XML image statistics file|
-OutputFile|-io.out|Output SVM model
-*ParameterSelection|-elev|Elevation management|dem;average|1
-*ParameterFile|-elev.dem|DEM directory|
-*ParameterFile|-elev.geoid|Geoid File|
-*ParameterNumber|-elev.default|Average Elevation|None|None|0.0
-*ParameterNumber|-sample.mt|Maximum training sample size|None|None|-1
-*ParameterNumber|-sample.mv|Maximum validation sample size|None|None|-1
-*ParameterBoolean|-sample.edg|On edge pixel inclusion|
-ParameterNumber|-sample.vtr|training and validation sample ratio|None|None|0.5
-ParameterString|-sample.vfn|Name of the discrimination field|Class
-ParameterSelection|-svm.k|SVM Kernel Type|linear;rbf;poly;sigmoid|0
-ParameterNumber|-svm.c|Cost parameter C.|None|None|1.0
-ParameterBoolean|-svm.opt|parameters optimization|
-ParameterNumber|-rand|set user defined seed|None|None|0
diff --git a/python/plugins/processing/otb/description/ValidateSVMImagesClassifier.txt b/python/plugins/processing/otb/description/ValidateSVMImagesClassifier.txt
deleted file mode 100644
index b29ac04fd4de..000000000000
--- a/python/plugins/processing/otb/description/ValidateSVMImagesClassifier.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-ValidateSVMImagesClassifier
-otbcli_ValidateSVMImagesClassifier
-Validate SVM Images Classifier
-Learning
-ParameterMultipleInput|-il|Input Image List|3|False
-ParameterMultipleInput|-vd|Vector Data List|-1|False
-ParameterFile|-imstat|XML image statistics file||
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
-OutputFile|-out|Output filename
-ParameterFile|-svm|SVM validation filename||
-ParameterNumber|-rand|set user defined seed|None|None|0
diff --git a/python/plugins/processing/otb/description/VectorDataDSValidation.txt b/python/plugins/processing/otb/description/VectorDataDSValidation.txt
deleted file mode 100644
index 64478cb3a9a8..000000000000
--- a/python/plugins/processing/otb/description/VectorDataDSValidation.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-VectorDataDSValidation
-otbcli_VectorDataDSValidation
-Vector Data validation
-Feature Extraction
-ParameterVector|-in|Input Vector Data|-1|False
-ParameterFile|-descmod|Descriptors model filename||
-ParameterString|-cri|Criterion|((Belief + Plausibility)/2.)
-ParameterNumber|-thd|Criterion threshold|None|None|0.5
-OutputVector|-out|Output Vector Data
diff --git a/python/plugins/processing/otb/description/VectorDataExtractROIApplication.txt b/python/plugins/processing/otb/description/VectorDataExtractROIApplication.txt
deleted file mode 100644
index 34cc96e55786..000000000000
--- a/python/plugins/processing/otb/description/VectorDataExtractROIApplication.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-VectorDataExtractROIApplication
-otbcli_VectorDataExtractROIApplication
-VectorData Extract ROI
-Vector Data Manipulation
-ParameterVector|-io.vd|Input Vector data|-1|False
-ParameterRaster|-io.in|Support image|False
-OutputVector|-io.out|Output Vector data
-ParameterSelection|-elev|Elevation management|dem;average|1
-ParameterFile|-elev.dem.path|DEM directory|
-ParameterFile|-elev.dem.geoid|Geoid File||
-ParameterNumber|-elev.average.value|Average Elevation|None|None|0.0
diff --git a/python/plugins/processing/otb/description/VectorDataReprojection.txt b/python/plugins/processing/otb/description/VectorDataReprojection.txt
deleted file mode 100644
index ed7e343e50f6..000000000000
--- a/python/plugins/processing/otb/description/VectorDataReprojection.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-VectorDataReprojection
-otbcli_VectorDataReprojection
-Vector Data reprojection
-Vector Data Manipulation
-ParameterFile|-in.vd|Input vector data||
-ParameterRaster|-in.kwl|Use image keywords list|True
-OutputFile|-out.vd|Output vector data
-ParameterSelection|-out.proj|Output Projection choice|image;user|0
-ParameterRaster|-out.proj.image.in|Image used to get projection map|False
-ParameterSelection|-out.proj.user.map|Output Cartographic Map Projection|utm;lambert2;lambert93;wgs;epsg|4
-ParameterNumber|-out.proj.user.map.utm.zone|Zone number|None|None|31
-ParameterBoolean|-out.proj.user.map.utm.northhem|Northern Hemisphere|
-ParameterNumber|-out.proj.user.map.epsg.code|EPSG Code|None|None|32631
diff --git a/python/plugins/processing/otb/description/VectorDataSetField.txt b/python/plugins/processing/otb/description/VectorDataSetField.txt
deleted file mode 100644
index a51262b79294..000000000000
--- a/python/plugins/processing/otb/description/VectorDataSetField.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-VectorDataSetField
-otbcli_VectorDataSetField
-Vector data set field
-Vector Data Manipulation
-ParameterVector|-in|Input|-1|False
-OutputVector|-out|Output
-ParameterString|-fn|Field|
-ParameterString|-fv|Value|
diff --git a/python/plugins/processing/otb/description/VectorDataTransform.txt b/python/plugins/processing/otb/description/VectorDataTransform.txt
deleted file mode 100644
index 57fdb56fd741..000000000000
--- a/python/plugins/processing/otb/description/VectorDataTransform.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-VectorDataTransform
-otbcli_VectorDataTransform
-Vector Data Transformation
-Vector Data Manipulation
-ParameterVector|-vd|Input Vector data|-1|False
-OutputVector|-out|Output Vector data
-ParameterRaster|-in|Support image|False
-ParameterNumber|-transform.tx|Translation X|None|None|0.0
-ParameterNumber|-transform.ty|Translation Y|None|None|0.0
-ParameterNumber|-transform.ro|Rotation Angle|None|None|0.0
-ParameterNumber|-transform.centerx|Center X|None|None|0.0
-ParameterNumber|-transform.centery|Center Y|None|None|0.0
-ParameterNumber|-transform.scale|Scale|None|None|1.0
diff --git a/python/plugins/processing/otb/description/VertexComponentAnalysis.txt b/python/plugins/processing/otb/description/VertexComponentAnalysis.txt
deleted file mode 100644
index 4b86196ce374..000000000000
--- a/python/plugins/processing/otb/description/VertexComponentAnalysis.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-VertexComponentAnalysis
-otbcli_VertexComponentAnalysis
-Vertex Component Analysis
-Miscellaneous
-ParameterRaster|-in|Input Image|False
-ParameterNumber|-ne|Number of endmembers|None|None|1
-OutputRaster|-outendm|Output Endmembers
-ParameterNumber|-rand|set user defined seed|None|None|0
diff --git a/python/plugins/processing/otb/description/WatershedSegmentation.txt b/python/plugins/processing/otb/description/WatershedSegmentation.txt
deleted file mode 100644
index 2f3279fbfa40..000000000000
--- a/python/plugins/processing/otb/description/WatershedSegmentation.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-WatershedSegmentation
-otbcli_Segmentation
-Watershed segmentation (labeled raster output)
-Segmentation
-ParameterRaster|-filter watershed -in|Input Image|False
-ParameterNumber|-filter.watershed.threshold|Depth Threshold|0|None|0.01
-ParameterNumber|-filter.watershed.level|Flood level|0|None|0.1
-OutputRaster|-mode raster -mode.raster.out|Output labeled image
-
diff --git a/python/plugins/processing/otb/description/WatershedSegmentation_vector.txt b/python/plugins/processing/otb/description/WatershedSegmentation_vector.txt
deleted file mode 100644
index 7445e63839a8..000000000000
--- a/python/plugins/processing/otb/description/WatershedSegmentation_vector.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-WatershedSegmentationVector
-otbcli_Segmentation
-Watershed segmentation (large-scale, vector output)
-Segmentation
-ParameterRaster|-filter watershed -in|Input Image|False
-ParameterNumber|-filter.watershed.threshold|Depth Threshold|0|None|0.01
-ParameterNumber|-filter.watershed.level|Flood level|0|None|0.1
-OutputVector|-mode vector -mode.vector.out|Output vector file
-ParameterVector|-mode.vector.inmask|Mask Image|-1|True
-ParameterBoolean|-mode.vector.neighbor|8-neighbor connectivity|False
-ParameterBoolean|-mode.vector.stitch|Stitch polygons|True
-ParameterNumber|-mode.vector.minsize|Minimum object size|1|None|1
-ParameterNumber|-mode.vector.simplify|Simplify polygons|None|None|0.0
-ParameterString|-mode.vector.layername|Layer name |layer
-ParemeterString|-mode.vector.fieldname|Geometry index field name|DN
-ParameterNumber|-mode.vector.tilesize|Tile size|0|None|1024
-ParameterNumber|-mode.vector.startlabel|Starting geometry index|1|None|1
-ParameterSelection|-mode.vector.outmode|Writing mode (update file/overwrite file/overwrite layer/update layer)|ulco;ovw;ulovw;ulu|0
-ParameterString|-mode.vector.ogroptions|OGR options for layer creation|
diff --git a/python/plugins/processing/otb/description/doc/BandMath.html b/python/plugins/processing/otb/description/doc/BandMath.html
index 824ea0921f1d..d51a52a29817 100644
--- a/python/plugins/processing/otb/description/doc/BandMath.html
+++ b/python/plugins/processing/otb/description/doc/BandMath.html
@@ -1,2 +1,6 @@
-
Band Math Application
Brief Description
Perform a mathematical operation on monoband images
Tags
Util
Long Description
This application performs a mathematical operation on monoband images. Mathematical formula interpretation is done via MuPasrer libraries http://muparser.sourceforge.net/
Parameters
[param] Input image list (-il): Image list to perform computation on.
[param] Output Image (-out): Output image.
[param] Available RAM (-ram): Available RAM
[param] Expression (-exp): The mathematical expression to apply.
-Use im1b1 for the first band, im1b2 for the second one...
Perform a mathematical operation on monoband images
Tags
Util
Long Description
This application performs a mathematical operation on monoband images. Mathematical formula interpretation is done via MuParser libraries http://muparser.sourceforge.net/
Parameters
[param] -il <string> Image list to perform computation on.. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -exp <string> The mathematical expression to apply.
+Use im1b1 for the first band, im1b2 for the second one.... Mandatory: True. Default Value: ""
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-closing.html b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-closing.html
new file mode 100644
index 000000000000..f0147996c389
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-closing.html
@@ -0,0 +1,5 @@
+
+
+
BinaryMorphologicalOperation
Brief Description
Performs morphological operations on an input image channel
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs binary morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
itkBinaryDilateImageFilter, itkBinaryErodeImageFilter, itkBinaryMorphologicalOpeningImageFilter and itkBinaryMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-dilate.html b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-dilate.html
new file mode 100644
index 000000000000..f0147996c389
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-dilate.html
@@ -0,0 +1,5 @@
+
+
+
BinaryMorphologicalOperation
Brief Description
Performs morphological operations on an input image channel
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs binary morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
itkBinaryDilateImageFilter, itkBinaryErodeImageFilter, itkBinaryMorphologicalOpeningImageFilter and itkBinaryMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-erode.html b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-erode.html
new file mode 100644
index 000000000000..f0147996c389
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-erode.html
@@ -0,0 +1,5 @@
+
+
+
BinaryMorphologicalOperation
Brief Description
Performs morphological operations on an input image channel
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs binary morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
itkBinaryDilateImageFilter, itkBinaryErodeImageFilter, itkBinaryMorphologicalOpeningImageFilter and itkBinaryMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-opening.html b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-opening.html
new file mode 100644
index 000000000000..f0147996c389
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation-opening.html
@@ -0,0 +1,5 @@
+
+
+
BinaryMorphologicalOperation
Brief Description
Performs morphological operations on an input image channel
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs binary morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
itkBinaryDilateImageFilter, itkBinaryErodeImageFilter, itkBinaryMorphologicalOpeningImageFilter and itkBinaryMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation.html b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation.html
new file mode 100644
index 000000000000..f0147996c389
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/BinaryMorphologicalOperation.html
@@ -0,0 +1,5 @@
+
+
+
BinaryMorphologicalOperation
Brief Description
Performs morphological operations on an input image channel
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs binary morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
Performs block-matching to estimate pixel-wise disparities between two images
-
Detailed description
-
This application allows to performs block-matching to estimate pixel-wise disparities between two images.
-The application allows to choose the block-matching method to use. It also allows to input masks (related to
-the left and right input image) of pixels for which the disparity should be investigated. Additionally,
-two criteria can be optionally used to disable disparity investigation for some pixel: a no-data
-
-
-value, and a threshold on the local variance. This allows to speed-up computation by avoiding to
-investigate disparities that will not be reliable anyway. For efficiency reasons, if the optimal metric
-values image is desired, it will be concatenated to the output image (which will then have three
-bands : horizontal disparity, vertical disparity and metric value). One can split these images
-afterward.
-
Parameters
-
This section describes in details the parameters available for this application. Table 4.38, page 447
-presents a summary of these parameters and the parameters keys to be used in command-line and
-programming languages. Application key is BlockMatching.
-
-
-
-
-
-
-
-
Parameter key
Parameter type
Parameter description
-
io
Group
Input and output data
-
io.inleft
Input image
Left input image
-
io.inright
Input image
Right input image
-
io.out
Output image
The output disparity map
-
io.outmask
Output image
The output mask corresponding to all
-criterions
-
io.outmetric
Boolean
Output optimal metric values as well
-
mask
Group
Image masking parameters
-
mask.inleft
Input image
Discard left pixels from mask image
-
mask.inright
Input image
Discard right pixels from mask image
-
mask.nodata
Float
Discard pixels with no-data value
-
mask.variancet
Float
Discard pixels with low local variance
-
bm
Group
Block matching parameters
-
bm.metric
Choices
Block-matching metric
-
bm.metric ssd
Choice
Sum of Squared Distances
-
bm.metric ncc
Choice
Normalized Cross-Correlation
bm.metric lp
Choice
Lp pseudo-norm
-
bm.metric.lp.p
Float
p value
-
bm.radius
Int
Radius of blocks
bm.minhd
Int
Minimum horizontal disparity
-
bm.maxhd
Int
Maximum horizontal disparity
-
bm.minvd
Int
Minimum vertical disparity
-
bm.maxvd
Int
Maximum vertical disparity
-
bm.subpixel
Choices
Sub-pixel interpolation
-
bm.subpixel none
Choice
None
-
bm.subpixel parabolic
Choice
Parabolic
-
bm.subpixel triangular
Choice
Triangular
-
bm.subpixel dichotomy
Choice
Dichotomy
-
bm.medianfilter
Group
Median filtering
-
bm.medianfilter.radius
Int
Radius
-
bm.medianfilter.incoherence
Float
Incoherence threshold
-
bm.initdisp
Choices
Initial disparities
-
bm.initdisp none
Choice
None
-
bm.initdisp uniform
Choice
Uniform initial disparity
-
bm.initdisp maps
Choice
Initial disparity maps
-
bm.initdisp.uniform.hdisp
Int
Horizontal initial disparity
-
bm.initdisp.uniform.vdisp
Int
Vertical initial disparity
-
bm.initdisp.uniform.hrad
Int
Horizontal exploration radius
-
bm.initdisp.uniform.vrad
Int
Vertical exploration radius
-
bm.initdisp.maps.hmap
Input image
Horizontal initial disparity map
-
bm.initdisp.maps.vmap
Input image
Vertical initial disparity map
-
bm.initdisp.maps.hrad
Int
Horizontal exploration radius
-
bm.initdisp.maps.vrad
Int
Vertical exploration radius
-
ram
Int
Available RAM (Mb)
-
-
-
Table 4.38: Parameters table for Pixel-wise Block-Matching.
-
-
-
-
-
Input and output data
-This group of parameters allows to set the input and output images.
-
-
Left input image: The left input image (reference)
-
-
Right input image: The right input (secondary)
-
-
The output disparity map: An image containing the estimated disparities as well as the
- metric values if the option is used
-
-
The output mask corresponding to all criterions: A mask image corresponding to all
- citerions (see masking parameters). Only required if variance threshold or nodata criterions
- are set.
-
-
Output optimal metric values as well: If used, the output image will have a second
- component with metric optimal values
-
-
Image masking parameters
-This group of parameters allows to determine the masking parameters to prevent disparities estimation for
-some pixels of the left image
-
-
Discard left pixels from mask image: This parameter allows to provide a custom mask for
- the left image.Block matching will be only perform on pixels inside the mask.
-
-
Discard right pixels from mask image: This parameter allows to provide a custom mask
- for the right image.Block matching will be perform only on pixels inside the mask.
-
-
Discard pixels with no-data value: This parameter allows to discard pixels whose value is
- equal to the user-defined no-data value.
-
-
-
-
Discard pixels with low local variance: This parameter allows to discard pixels whose local
- variance is too small (the size of the neighborhood is given by the radius parameter)
-
-
Block matching parameters
-This group of parameters allow to tune the block-matching behaviour
-
-
Block-matching metric:
-
Available choices are:
-
-
Sum of Squared Distances: Sum of squared distances between pixels value in the metric
- window
-
-
Normalized Cross-Correlation: Normalized Cross-Correlation between the left and
- right windows
-
-
Lp pseudo-norm: Lp pseudo-norm between the left and right windows
-
-
p value: Value of the p parameter in Lp pseudo-norm (must be positive)
-
-
-
Radius of blocks: The radius (in pixels) of blocks in Block-Matching
-
-
Minimum horizontal disparity: Minimum horizontal disparity to explore (can be negative)
-
-
Maximum horizontal disparity: Maximum horizontal disparity to explore (can be
- negative)
-
-
Minimum vertical disparity: Minimum vertical disparity to explore (can be negative)
-
-
Maximum vertical disparity: Maximum vertical disparity to explore (can be negative)
-
-
Sub-pixel interpolation: Estimate disparities with sub-pixel precision
-
Available choices are:
-
-
-
-
None: No sub-pixel
-
-
Parabolic: Parabolic fit
-
-
Triangular: Triangular fit
-
-
Dichotomy: Dichotomic search
-
-
Median filtering: Use a median filter to get a smooth disparity map
-
-
Radius: Radius for median filter
-
-
Incoherence threshold: Incoherence threshold between original and filtered disparity
-
-
-
Initial disparities:
-
Available choices are:
-
-
None: No initial disparity used
-
-
Uniform initial disparity: Use an uniform initial disparity estimate
-
-
Horizontal initial disparity: Value of the uniform horizontal disparity initial
- estimate (in pixels)
-
-
Vertical initial disparity: Value of the uniform vertical disparity initial estimate
- (in pixels)
-
-
Horizontal exploration radius: Horizontal exploration radius around the initial
- disparity estimate (in pixels)
-
-
Vertical exploration radius: Vertical exploration radius around the initial disparity
- estimate (in pixels)
-
-
Initial disparity maps: Use initial disparity maps
-
-
Horizontal initial disparity map: Map of the initial horizontal disparities
-
-
-
-
Vertical initial disparity map: Map of the initial vertical disparities
-
-
Horizontal exploration radius: Horizontal exploration radius around the initial
- disparity estimate (in pixels)
-
-
Vertical exploration radius: Vertical exploration radius around the initial disparity
- estimate (in pixels)
-
-
-
Available RAM (Mb)
-Available memory for processing (in MB)
-
-
Example
-
To run this example in command-line, use the following:
-
Performs block-matching to estimate pixel-wise disparities between two images
Tags
Stereo
Long Description
This application allows to performs block-matching to estimate pixel-wise disparities between two images. The application allows to choose the block-matching method to use. It also allows to input masks (related to the left and right input image) of pixels for which the disparity should be investigated. Additionally, two criteria can be optionally used to disable disparity investigation for some pixel: a no-data value, and a threshold on the local variance. This allows to speed-up computation by avoiding to investigate disparities that will not be reliable anyway. For efficiency reasons, if the optimal metric values image is desired, it will be concatenated to the output image (which will then have three bands : horizontal disparity, vertical disparity and metric value). One can split these images afterward.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -mask <string> This group of parameters allows to determine the masking parameters to prevent disparities estimation for some pixels of the left image. Mandatory: True. Default Value: "0"
[param] -bm <string> This group of parameters allow to tune the block-matching behaviour. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
None
Authors
OTB-Team
See Also
otbStereoRectificationGridGenerator
Example of use
io.inleft: StereoFixed.png
io.inright: StereoMoving.png
bm.minhd: -10
bm.maxhd: 10
mask.variancet: 10
io.out: MyDisparity.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/BundleToPerfectSensor.html b/python/plugins/processing/otb/description/doc/BundleToPerfectSensor.html
index 688c065511d6..2a2e4404838a 100644
--- a/python/plugins/processing/otb/description/doc/BundleToPerfectSensor.html
+++ b/python/plugins/processing/otb/description/doc/BundleToPerfectSensor.html
@@ -1 +1,5 @@
-
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -lms <float> Spacing of the deformation field. Default is 10 times the PAN image spacing.. Mandatory: False. Default Value: "0.0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
None
Authors
OTB-Team
See Also
Example of use
inp: QB_Toulouse_Ortho_PAN.tif
inxs: QB_Toulouse_Ortho_XS.tif
out: BundleToPerfectSensor.png uchar
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ClassificationMapRegularization.html b/python/plugins/processing/otb/description/doc/ClassificationMapRegularization.html
new file mode 100644
index 000000000000..95a7491bc525
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ClassificationMapRegularization.html
@@ -0,0 +1,7 @@
+
+
+
ClassificationMapRegularization
Brief Description
Filters the input labeled image using Majority Voting in a ball shaped neighbordhood.
Tags
Learning,Image Analysis
Long Description
This application filters the input labeled image using Majority Voting in a ball shaped neighbordhood. Majority Voting takes the more representative value of all the pixels identified by the ball shaped structuring element and then sets the center pixel to this majority label value.
+ -NoData is the label of the NOT classified pixels in the input image. These input pixels keep their NoData label in the output image.
+ -Pixels with more than 1 majority class are marked as Undecided if the parameter 'ip.suvbool == true', or keep their Original labels otherwise.
Parameters
[param] -io <string> This group of parameters allows to set input and output images for classification map regularization by Majority Voting.. Mandatory: True. Default Value: "0"
[param] -ip <string> This group allows to set parameters for classification map regularization by Majority Voting.. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
The input image must be a single band labeled image. The structuring element radius must have a minimum value equal to 1 pixel. Please note that the Undecided value must be different from existing labels in the input labeled image.
Authors
OTB-Team
See Also
Documentation of the ClassificationMapRegularization application.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ColorMapping-continuous.html b/python/plugins/processing/otb/description/doc/ColorMapping-continuous.html
new file mode 100644
index 000000000000..a57cae59fbe2
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ColorMapping-continuous.html
@@ -0,0 +1,13 @@
+
+
+
ColorMapping
Brief Description
Maps an input label image to 8-bits RGB using look-up tables.
This application allows to map a label image to a 8-bits RGB image (in both ways) using different methods.
+ -The custom method allows to use a custom look-up table. The look-up table is loaded from a text file where each line describes an entry. The typical use of this method is to colorise a classification map.
+ -The continuous method allows to map a range of values in a scalar input image to a colored image using continuous look-up table, in order to enhance image interpretation. Several look-up tables can been chosen with different color ranges.
+-The optimal method computes an optimal look-up table. When processing a segmentation label image (label to color), the color difference between adjacent segmented regions is maximized. When processing an unknown color image (color to label), all the present colors are mapped to a continuous label list.
+ - The support image method uses a color support image to associate an average color to each region.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -op Selection of the operation to execute (default is : label to color). labeltocolor,colortolabel. Mandatory: True. Default Value: "labeltocolor"
[group] -labeltocolor
[group] -colortolabel
[param] -op.colortolabel.notfound <int32> Label to use for unknown colors.. Mandatory: False. Default Value: "404"
[choice] -method Selection of color mapping methods and their parameters. custom,continuous,optimal,image. Mandatory: True. Default Value: "custom"
[group] -custom
[param] -method.custom.lut <string> An ASCII file containing the look-up table
+with one color per line
+(for instance the line '1 255 0 0' means that all pixels with label 1 will be replaced by RGB color 255 0 0)
+Lines beginning with a # are ignored. Mandatory: True. Default Value: ""
[param] -method.continuous.min <float> Set the lower input value of the mapping range.. Mandatory: True. Default Value: "0"
[param] -method.continuous.max <float> Set the higher input value of the mapping range.. Mandatory: True. Default Value: "255"
[group] -optimal
[param] -method.optimal.background <int32> Value of the background label. Mandatory: True. Default Value: "0"
[group] -image
[param] -method.image.in <string> Support image filename. LUT is calculated using the mean af pixel value on the area. First of all image is normalized with extrema rejection. Mandatory: True. Default Value: ""
The segmentation optimal method does not support streaming, and thus large images. The operation color to label is not implemented for the methods continuous LUT and support image LUT.
+ ColorMapping using support image is not threaded.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ColorMapping-custom.html b/python/plugins/processing/otb/description/doc/ColorMapping-custom.html
new file mode 100644
index 000000000000..a57cae59fbe2
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ColorMapping-custom.html
@@ -0,0 +1,13 @@
+
+
+
ColorMapping
Brief Description
Maps an input label image to 8-bits RGB using look-up tables.
This application allows to map a label image to a 8-bits RGB image (in both ways) using different methods.
+ -The custom method allows to use a custom look-up table. The look-up table is loaded from a text file where each line describes an entry. The typical use of this method is to colorise a classification map.
+ -The continuous method allows to map a range of values in a scalar input image to a colored image using continuous look-up table, in order to enhance image interpretation. Several look-up tables can been chosen with different color ranges.
+-The optimal method computes an optimal look-up table. When processing a segmentation label image (label to color), the color difference between adjacent segmented regions is maximized. When processing an unknown color image (color to label), all the present colors are mapped to a continuous label list.
+ - The support image method uses a color support image to associate an average color to each region.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -op Selection of the operation to execute (default is : label to color). labeltocolor,colortolabel. Mandatory: True. Default Value: "labeltocolor"
[group] -labeltocolor
[group] -colortolabel
[param] -op.colortolabel.notfound <int32> Label to use for unknown colors.. Mandatory: False. Default Value: "404"
[choice] -method Selection of color mapping methods and their parameters. custom,continuous,optimal,image. Mandatory: True. Default Value: "custom"
[group] -custom
[param] -method.custom.lut <string> An ASCII file containing the look-up table
+with one color per line
+(for instance the line '1 255 0 0' means that all pixels with label 1 will be replaced by RGB color 255 0 0)
+Lines beginning with a # are ignored. Mandatory: True. Default Value: ""
[param] -method.continuous.min <float> Set the lower input value of the mapping range.. Mandatory: True. Default Value: "0"
[param] -method.continuous.max <float> Set the higher input value of the mapping range.. Mandatory: True. Default Value: "255"
[group] -optimal
[param] -method.optimal.background <int32> Value of the background label. Mandatory: True. Default Value: "0"
[group] -image
[param] -method.image.in <string> Support image filename. LUT is calculated using the mean af pixel value on the area. First of all image is normalized with extrema rejection. Mandatory: True. Default Value: ""
The segmentation optimal method does not support streaming, and thus large images. The operation color to label is not implemented for the methods continuous LUT and support image LUT.
+ ColorMapping using support image is not threaded.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ColorMapping-image.html b/python/plugins/processing/otb/description/doc/ColorMapping-image.html
new file mode 100644
index 000000000000..a57cae59fbe2
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ColorMapping-image.html
@@ -0,0 +1,13 @@
+
+
+
ColorMapping
Brief Description
Maps an input label image to 8-bits RGB using look-up tables.
This application allows to map a label image to a 8-bits RGB image (in both ways) using different methods.
+ -The custom method allows to use a custom look-up table. The look-up table is loaded from a text file where each line describes an entry. The typical use of this method is to colorise a classification map.
+ -The continuous method allows to map a range of values in a scalar input image to a colored image using continuous look-up table, in order to enhance image interpretation. Several look-up tables can been chosen with different color ranges.
+-The optimal method computes an optimal look-up table. When processing a segmentation label image (label to color), the color difference between adjacent segmented regions is maximized. When processing an unknown color image (color to label), all the present colors are mapped to a continuous label list.
+ - The support image method uses a color support image to associate an average color to each region.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -op Selection of the operation to execute (default is : label to color). labeltocolor,colortolabel. Mandatory: True. Default Value: "labeltocolor"
[group] -labeltocolor
[group] -colortolabel
[param] -op.colortolabel.notfound <int32> Label to use for unknown colors.. Mandatory: False. Default Value: "404"
[choice] -method Selection of color mapping methods and their parameters. custom,continuous,optimal,image. Mandatory: True. Default Value: "custom"
[group] -custom
[param] -method.custom.lut <string> An ASCII file containing the look-up table
+with one color per line
+(for instance the line '1 255 0 0' means that all pixels with label 1 will be replaced by RGB color 255 0 0)
+Lines beginning with a # are ignored. Mandatory: True. Default Value: ""
[param] -method.continuous.min <float> Set the lower input value of the mapping range.. Mandatory: True. Default Value: "0"
[param] -method.continuous.max <float> Set the higher input value of the mapping range.. Mandatory: True. Default Value: "255"
[group] -optimal
[param] -method.optimal.background <int32> Value of the background label. Mandatory: True. Default Value: "0"
[group] -image
[param] -method.image.in <string> Support image filename. LUT is calculated using the mean af pixel value on the area. First of all image is normalized with extrema rejection. Mandatory: True. Default Value: ""
The segmentation optimal method does not support streaming, and thus large images. The operation color to label is not implemented for the methods continuous LUT and support image LUT.
+ ColorMapping using support image is not threaded.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ColorMapping-optimal.html b/python/plugins/processing/otb/description/doc/ColorMapping-optimal.html
new file mode 100644
index 000000000000..a57cae59fbe2
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ColorMapping-optimal.html
@@ -0,0 +1,13 @@
+
+
+
ColorMapping
Brief Description
Maps an input label image to 8-bits RGB using look-up tables.
This application allows to map a label image to a 8-bits RGB image (in both ways) using different methods.
+ -The custom method allows to use a custom look-up table. The look-up table is loaded from a text file where each line describes an entry. The typical use of this method is to colorise a classification map.
+ -The continuous method allows to map a range of values in a scalar input image to a colored image using continuous look-up table, in order to enhance image interpretation. Several look-up tables can been chosen with different color ranges.
+-The optimal method computes an optimal look-up table. When processing a segmentation label image (label to color), the color difference between adjacent segmented regions is maximized. When processing an unknown color image (color to label), all the present colors are mapped to a continuous label list.
+ - The support image method uses a color support image to associate an average color to each region.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -op Selection of the operation to execute (default is : label to color). labeltocolor,colortolabel. Mandatory: True. Default Value: "labeltocolor"
[group] -labeltocolor
[group] -colortolabel
[param] -op.colortolabel.notfound <int32> Label to use for unknown colors.. Mandatory: False. Default Value: "404"
[choice] -method Selection of color mapping methods and their parameters. custom,continuous,optimal,image. Mandatory: True. Default Value: "custom"
[group] -custom
[param] -method.custom.lut <string> An ASCII file containing the look-up table
+with one color per line
+(for instance the line '1 255 0 0' means that all pixels with label 1 will be replaced by RGB color 255 0 0)
+Lines beginning with a # are ignored. Mandatory: True. Default Value: ""
[param] -method.continuous.min <float> Set the lower input value of the mapping range.. Mandatory: True. Default Value: "0"
[param] -method.continuous.max <float> Set the higher input value of the mapping range.. Mandatory: True. Default Value: "255"
[group] -optimal
[param] -method.optimal.background <int32> Value of the background label. Mandatory: True. Default Value: "0"
[group] -image
[param] -method.image.in <string> Support image filename. LUT is calculated using the mean af pixel value on the area. First of all image is normalized with extrema rejection. Mandatory: True. Default Value: ""
The segmentation optimal method does not support streaming, and thus large images. The operation color to label is not implemented for the methods continuous LUT and support image LUT.
+ ColorMapping using support image is not threaded.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ColorMapping.html b/python/plugins/processing/otb/description/doc/ColorMapping.html
index b7b31ec421b2..a57cae59fbe2 100644
--- a/python/plugins/processing/otb/description/doc/ColorMapping.html
+++ b/python/plugins/processing/otb/description/doc/ColorMapping.html
@@ -1,7 +1,13 @@
-
Color Mapping
Brief Description
Maps an input grayscale image into 8-bits RGB using look-up tables.
Tags
Learning
Long Description
This application allows to map an input grayscale into a 8-bits RGB image using three different methods.
--The custom method allows to apply a custom look-up table to a labeled image. The look-up table is loaded from a text file where each line describes an entry. The typical use of this method is to colorise a classification map.
--The continuous method allows to map a range of values in a scalar input image into a colored image using continuous look-up table, in order to enhance image interpretation. Several look-up tables can ben chosen with different color ranges.
--The segmentation method is dedicated to segmentation labeled outputs where each segment correspond to a unique labeled. It computes an optimal look-up table such that color difference between adjacent segmented regions is maximised.
[choice] Color mapping method (-method): Selection of color mapping methods and their parameters.
[group] Color mapping with custom labeled look-up table: Apply a user-defined look-up table to a labeled image. Look-up table is loaded from a text file.
[param] Look-up table file (-lut): An ASCII file containing the look-up table
+
+
+
ColorMapping
Brief Description
Maps an input label image to 8-bits RGB using look-up tables.
This application allows to map a label image to a 8-bits RGB image (in both ways) using different methods.
+ -The custom method allows to use a custom look-up table. The look-up table is loaded from a text file where each line describes an entry. The typical use of this method is to colorise a classification map.
+ -The continuous method allows to map a range of values in a scalar input image to a colored image using continuous look-up table, in order to enhance image interpretation. Several look-up tables can been chosen with different color ranges.
+-The optimal method computes an optimal look-up table. When processing a segmentation label image (label to color), the color difference between adjacent segmented regions is maximized. When processing an unknown color image (color to label), all the present colors are mapped to a continuous label list.
+ - The support image method uses a color support image to associate an average color to each region.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -op Selection of the operation to execute (default is : label to color). labeltocolor,colortolabel. Mandatory: True. Default Value: "labeltocolor"
[group] -labeltocolor
[group] -colortolabel
[param] -op.colortolabel.notfound <int32> Label to use for unknown colors.. Mandatory: False. Default Value: "404"
[choice] -method Selection of color mapping methods and their parameters. custom,continuous,optimal,image. Mandatory: True. Default Value: "custom"
[group] -custom
[param] -method.custom.lut <string> An ASCII file containing the look-up table
with one color per line
(for instance the line '1 255 0 0' means that all pixels with label 1 will be replaced by RGB color 255 0 0)
-Lines beginning with a # are ignored
[group] Color mapping with continuous look-up table: Apply a continuous look-up table to a range of input values.
[choice] Look-up tables (-lut): Available look-up tables.
[group] Red:
[group] Green:
[group] Blue:
[group] Grey:
[group] Hot:
[group] Cool:
[group] Spring:
[group] Summer:
[group] Autumn:
[group] Winter:
[group] Copper:
[group] Jet:
[group] HSV:
[group] OverUnder:
[group] Relief:
[param] Mapping range lower value (-min): Set the lower input value of the mapping range.
[param] Mapping range higher value (-max): Set the higher input value of the mapping range.
[group] Color mapping with a look-up table optimised for segmentation: Compute an optimal look-up table such that neighbouring labels in a segmentation are mapped to highly contrasted colors.
[param] Background label (-background): Value of the background label
Limitations
The segmentation method does not support streaming, and thus large images.
[param] -method.continuous.min <float> Set the lower input value of the mapping range.. Mandatory: True. Default Value: "0"
[param] -method.continuous.max <float> Set the higher input value of the mapping range.. Mandatory: True. Default Value: "255"
[group] -optimal
[param] -method.optimal.background <int32> Value of the background label. Mandatory: True. Default Value: "0"
[group] -image
[param] -method.image.in <string> Support image filename. LUT is calculated using the mean af pixel value on the area. First of all image is normalized with extrema rejection. Mandatory: True. Default Value: ""
The segmentation optimal method does not support streaming, and thus large images. The operation color to label is not implemented for the methods continuous LUT and support image LUT.
+ ColorMapping using support image is not threaded.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/CompareImages.html b/python/plugins/processing/otb/description/doc/CompareImages.html
index 68a07db3a7fb..ce92b570b672 100644
--- a/python/plugins/processing/otb/description/doc/CompareImages.html
+++ b/python/plugins/processing/otb/description/doc/CompareImages.html
@@ -1 +1,5 @@
-
Images comparaison
Brief Description
Estimator between 2 images.
Tags
Statistics
Long Description
This application computes MSE (Mean Squared Error), MAE (Mean Absolute Error) and PSNR(Peak Signal to Noise Ratio) between the channel of two images (reference and measurement). The user has to set the used channel and can specified an ROI.
Parameters
[group] Reference image properties (-ref):
[param] Reference image (-in): Image used as reference in the comparaison
[param] Reference image channel (-channel): Used channel for the reference image
[group] Measured image properties (-meas):
[param] Measured image (-in): Image used as measured in the comparaison
[param] Measured image channel (-channel): Used channel for the measured image
[group] Region Of Interest (-roi):
[param] Start X (-startx): ROI start x position.
[param] Start Y (-starty): ROI start y position.
[param] Size X (-sizex): size along x in pixels.
[param] Size Y (-sizey): size along y in pixels.
[param] MSE (-mse): Mean Squared Error value
[param] MAE (-mae): Mean Absolute Error value
[param] PSNR (-psnr): Peak Signal to Noise Ratio value
This application computes MSE (Mean Squared Error), MAE (Mean Absolute Error) and PSNR (Peak Signal to Noise Ratio) between the channel of two images (reference and measurement). The user has to set the used channel and can specify a ROI.
[param] -psnr <float> Peak Signal to Noise Ratio value. Mandatory: True. Default Value: "0.0"
Limitations
None
Authors
OTB-Team
See Also
BandMath application, ImageStatistics
Example of use
ref.in: GomaApres.png
ref.channel: 1
meas.in: GomaAvant.png
meas.channel: 2
roi.startx: 20
roi.starty: 30
roi.sizex: 150
roi.sizey: 200
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix-raster.html b/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix-raster.html
new file mode 100644
index 000000000000..d8299fe437d4
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix-raster.html
@@ -0,0 +1,5 @@
+
+
+
ComputeConfusionMatrix
Brief Description
Computes the confusion matrix of a classification
Tags
Learning
Long Description
This application computes the confusion matrix of a classification map relatively to a ground truth. This ground truth can be given as a raster or a vector data. Only reference and produced pixels with values different from NoData are handled in the calculation of the confusion matrix. The confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the output file, the reference and produced class labels are ordered according to the rows/columns of the confusion matrix.
[param] -out <string> Filename to store the output matrix (csv format). Mandatory: True. Default Value: ""
[param] -nodatalabel <int32> Label for the NoData class. Such input pixels will be discarded from the ground truth and from the input classification map. By default, 'nodatalabel = 0'.. Mandatory: False. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -ref Choice of ground truth format raster,vector. Mandatory: True. Default Value: "raster"
[group] -raster
[param] -ref.raster.in <string> Input image containing the ground truth labels. Mandatory: True. Default Value: ""
[group] -vector
[param] -ref.vector.in <string> Input vector data of the ground truth. Mandatory: True. Default Value: ""
[param] -ref.vector.field <string> Field name containing the label values. Mandatory: False. Default Value: "Class"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: clLabeledImageQB1.tif
out: ConfusionMatrix.csv
ref: vector
ref.vector.in: VectorData_QB1_bis.shp
ref.vector.field: Class
nodatalabel: 255
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix-vector.html b/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix-vector.html
new file mode 100644
index 000000000000..d8299fe437d4
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix-vector.html
@@ -0,0 +1,5 @@
+
+
+
ComputeConfusionMatrix
Brief Description
Computes the confusion matrix of a classification
Tags
Learning
Long Description
This application computes the confusion matrix of a classification map relatively to a ground truth. This ground truth can be given as a raster or a vector data. Only reference and produced pixels with values different from NoData are handled in the calculation of the confusion matrix. The confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the output file, the reference and produced class labels are ordered according to the rows/columns of the confusion matrix.
[param] -out <string> Filename to store the output matrix (csv format). Mandatory: True. Default Value: ""
[param] -nodatalabel <int32> Label for the NoData class. Such input pixels will be discarded from the ground truth and from the input classification map. By default, 'nodatalabel = 0'.. Mandatory: False. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -ref Choice of ground truth format raster,vector. Mandatory: True. Default Value: "raster"
[group] -raster
[param] -ref.raster.in <string> Input image containing the ground truth labels. Mandatory: True. Default Value: ""
[group] -vector
[param] -ref.vector.in <string> Input vector data of the ground truth. Mandatory: True. Default Value: ""
[param] -ref.vector.field <string> Field name containing the label values. Mandatory: False. Default Value: "Class"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: clLabeledImageQB1.tif
out: ConfusionMatrix.csv
ref: vector
ref.vector.in: VectorData_QB1_bis.shp
ref.vector.field: Class
nodatalabel: 255
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix.html b/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix.html
new file mode 100644
index 000000000000..d8299fe437d4
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ComputeConfusionMatrix.html
@@ -0,0 +1,5 @@
+
+
+
ComputeConfusionMatrix
Brief Description
Computes the confusion matrix of a classification
Tags
Learning
Long Description
This application computes the confusion matrix of a classification map relatively to a ground truth. This ground truth can be given as a raster or a vector data. Only reference and produced pixels with values different from NoData are handled in the calculation of the confusion matrix. The confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the output file, the reference and produced class labels are ordered according to the rows/columns of the confusion matrix.
[param] -out <string> Filename to store the output matrix (csv format). Mandatory: True. Default Value: ""
[param] -nodatalabel <int32> Label for the NoData class. Such input pixels will be discarded from the ground truth and from the input classification map. By default, 'nodatalabel = 0'.. Mandatory: False. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -ref Choice of ground truth format raster,vector. Mandatory: True. Default Value: "raster"
[group] -raster
[param] -ref.raster.in <string> Input image containing the ground truth labels. Mandatory: True. Default Value: ""
[group] -vector
[param] -ref.vector.in <string> Input vector data of the ground truth. Mandatory: True. Default Value: ""
[param] -ref.vector.field <string> Field name containing the label values. Mandatory: False. Default Value: "Class"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: clLabeledImageQB1.tif
out: ConfusionMatrix.csv
ref: vector
ref.vector.in: VectorData_QB1_bis.shp
ref.vector.field: Class
nodatalabel: 255
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeImagesStatistics.html b/python/plugins/processing/otb/description/doc/ComputeImagesStatistics.html
index 34e957f0a31a..4cc40bfc5a71 100644
--- a/python/plugins/processing/otb/description/doc/ComputeImagesStatistics.html
+++ b/python/plugins/processing/otb/description/doc/ComputeImagesStatistics.html
@@ -1 +1,5 @@
-
Compute Images second order statistics
Brief Description
Computes global mean and standard deviation for each band from a set of images and optionally saves the results in an XML file.
Tags
Learning, Image Analysis
Long Description
This application computes a global mean and standard deviation for each band of a set of images and optionally saves the results in an XML file. The output XML is intended to be used an input for the TrainImagesSVMClassifier application to normalize samples before learning.
Parameters
[param] Input images (-il): List of input images filenames.
[param] Output XML file (-out): XML filename where the statistics are saved for future reuse
Limitations
The set of input images must have the same number of bands. Input images must be of the same number, type and order of bands.
Authors
OTB-Team
See also
Documentation of the TrainImagesSVMClassifier application.
Computes global mean and standard deviation for each band from a set of images and optionally saves the results in an XML file.
Tags
Learning,Image Analysis
Long Description
This application computes a global mean and standard deviation for each band of a set of images and optionally saves the results in an XML file. The output XML is intended to be used an input for the TrainImagesClassifier application to normalize samples before learning.
Parameters
[param] -il <string> List of input images filenames.. Mandatory: True. Default Value: "0"
[param] -bv <float> Background value to ignore in statistics computation.. Mandatory: False. Default Value: "0.0"
[param] -out <string> XML filename where the statistics are saved for future reuse.. Mandatory: False. Default Value: ""
Limitations
Each image of the set must contain the same bands as the others (i.e. same types, in the same order).
Authors
OTB-Team
See Also
Documentation of the TrainImagesClassifier application.
Example of use
il: QB_1_ortho.tif
out: EstimateImageStatisticsQB1.xml
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase-OneEntry.html b/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase-OneEntry.html
new file mode 100644
index 000000000000..bb861bd888a5
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase-OneEntry.html
@@ -0,0 +1,5 @@
+
+
+
ComputeModulusAndPhase
Brief Description
Read one or two files and compute the module and the phase
Tags
SAR
Long Description
This application computes the modulus and the phase of a complex SAR data. This complex SAR data could be provided as a monoband complex pixel type image or a 2 bands real pixel type image or two monobands (first one real part and second one imaginary part) real pixel type images.
Parameters
[param] -mod <string> Modulus of the input: sqrt(real*real + imag*imag).. Mandatory: True. Default Value: ""
[param] -pha <string> Phase of the input: atan2(imag, real).. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -nbinput Choice about the number of input files used to store the real and imaginary part of the SAR image one,two. Mandatory: True. Default Value: "one"
[group] -one
[param] -nbinput.one.in <string> Image file with SAR data.. Mandatory: True. Default Value: ""
[group] -two
[param] -nbinput.two.re <string> Image file with real part of the SAR data.. Mandatory: False. Default Value: ""
[param] -nbinput.two.im <string> Image file with imaginary part of the SAR data.. Mandatory: False. Default Value: ""
Limitations
None
Authors
Alexia Mondot (alexia.mondot@c-s.fr) and Mickael Savinaud (mickael.savinaud@c-s.fr)
See Also
Example of use
nbinput.one.in: monobandComplexFloat.tif
mod: modulus.tif
pha: phase.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase-TwoEntries.html b/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase-TwoEntries.html
new file mode 100644
index 000000000000..bb861bd888a5
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase-TwoEntries.html
@@ -0,0 +1,5 @@
+
+
+
ComputeModulusAndPhase
Brief Description
Read one or two files and compute the module and the phase
Tags
SAR
Long Description
This application computes the modulus and the phase of a complex SAR data. This complex SAR data could be provided as a monoband complex pixel type image or a 2 bands real pixel type image or two monobands (first one real part and second one imaginary part) real pixel type images.
Parameters
[param] -mod <string> Modulus of the input: sqrt(real*real + imag*imag).. Mandatory: True. Default Value: ""
[param] -pha <string> Phase of the input: atan2(imag, real).. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -nbinput Choice about the number of input files used to store the real and imaginary part of the SAR image one,two. Mandatory: True. Default Value: "one"
[group] -one
[param] -nbinput.one.in <string> Image file with SAR data.. Mandatory: True. Default Value: ""
[group] -two
[param] -nbinput.two.re <string> Image file with real part of the SAR data.. Mandatory: False. Default Value: ""
[param] -nbinput.two.im <string> Image file with imaginary part of the SAR data.. Mandatory: False. Default Value: ""
Limitations
None
Authors
Alexia Mondot (alexia.mondot@c-s.fr) and Mickael Savinaud (mickael.savinaud@c-s.fr)
See Also
Example of use
nbinput.one.in: monobandComplexFloat.tif
mod: modulus.tif
pha: phase.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase.html b/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase.html
new file mode 100644
index 000000000000..bb861bd888a5
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ComputeModulusAndPhase.html
@@ -0,0 +1,5 @@
+
+
+
ComputeModulusAndPhase
Brief Description
Read one or two files and compute the module and the phase
Tags
SAR
Long Description
This application computes the modulus and the phase of a complex SAR data. This complex SAR data could be provided as a monoband complex pixel type image or a 2 bands real pixel type image or two monobands (first one real part and second one imaginary part) real pixel type images.
Parameters
[param] -mod <string> Modulus of the input: sqrt(real*real + imag*imag).. Mandatory: True. Default Value: ""
[param] -pha <string> Phase of the input: atan2(imag, real).. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -nbinput Choice about the number of input files used to store the real and imaginary part of the SAR image one,two. Mandatory: True. Default Value: "one"
[group] -one
[param] -nbinput.one.in <string> Image file with SAR data.. Mandatory: True. Default Value: ""
[group] -two
[param] -nbinput.two.re <string> Image file with real part of the SAR data.. Mandatory: False. Default Value: ""
[param] -nbinput.two.im <string> Image file with imaginary part of the SAR data.. Mandatory: False. Default Value: ""
Limitations
None
Authors
Alexia Mondot (alexia.mondot@c-s.fr) and Mickael Savinaud (mickael.savinaud@c-s.fr)
See Also
Example of use
nbinput.one.in: monobandComplexFloat.tif
mod: modulus.tif
pha: phase.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ComputePolylineFeatureFromImage.html b/python/plugins/processing/otb/description/doc/ComputePolylineFeatureFromImage.html
index 5ad92c630264..e256c147602d 100644
--- a/python/plugins/processing/otb/description/doc/ComputePolylineFeatureFromImage.html
+++ b/python/plugins/processing/otb/description/doc/ComputePolylineFeatureFromImage.html
@@ -1 +1,5 @@
-
Compute Polyline Feature From Image Application
Brief Description
Compute a polyline feature descriptors from an input image which are part of the polyline pixels that verify the FeatureExpression
Tags
Feature Extraction
Long Description
This application computes a polyline feature descriptors from an input image which are part of the polyline pixels that verify the FeatureExpression.
Parameters
[param] Input Image (-in): An image from which to compute description.
[param] Vector Data (-vd): Vector data containing the polylines onto which the feature will be computed.
[param] DEM repository (-dem): path to SRTM repository
[param] Feature expression (-expr): The feature formula (b1 > 0.3)
[param] Feature name (-field): The feature name (NONDVI, ROADSA...)
[param] Output Vector Data (-out): The output vector data containing the features
Limitations
Since it do not rely on streaming process, take care of the size of input image before launching application.
This application compute for each studied polyline, contained in the input VectorData, the choosen descriptors.
Tags
Feature Extraction
Long Description
The first step in the classifier fusion based validation is to compute, for each studied polyline, the choosen descriptors.
Parameters
[param] -in <string> An image to compute the descriptors on.. Mandatory: True. Default Value: ""
[param] -vd <string> Vector data containing the polylines where the features will be computed.. Mandatory: True. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -expr <string> The feature formula (b1 < 0.3) where b1 is the standard name of input image first band. Mandatory: True. Default Value: ""
[param] -field <string> The field name corresponding to the feature codename (NONDVI, ROADSA...). Mandatory: True. Default Value: ""
[param] -out <string> The output vector data containing polylines with a new field. Mandatory: True. Default Value: ""
Limitations
Since it does not rely on streaming process, take care of the size of input image before launching application.
Authors
OTB-Team
See Also
Example of use
in: NDVI.TIF
vd: roads_ground_truth.shp
expr: "(b1 > 0.4)"
field: NONDVI
out: PolylineFeatureFromImage_LI_NONDVI_gt.shp
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ConcatenateImages.html b/python/plugins/processing/otb/description/doc/ConcatenateImages.html
index e7341f47f0dc..76caf5d7e76d 100644
--- a/python/plugins/processing/otb/description/doc/ConcatenateImages.html
+++ b/python/plugins/processing/otb/description/doc/ConcatenateImages.html
@@ -1 +1,5 @@
-
Images Concatenation Application
Brief Description
Concatenate a list of images into a single multi channel one.
Tags
Image Manipulation, Concatenation, Multi channel
Long Description
This application performs images concatenation. It will take the input image list (mono or multi channel) and generate a single multi channel image. The channel order is the one of the list.
Parameters
[param] Input image list (-il): Image list to concatenate
Concatenate a list of images of the same size into a single multi-channel one.
Tags
Image Manipulation,Concatenation,Multi-channel
Long Description
This application performs images channels concatenation. It will walk the input image list (single or multi-channel) and generates a single multi-channel image. The channel order is the one of the list.
Parameters
[param] -il <string> The list of images to concatenate. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
All input images must have the same size.
Authors
OTB-Team
See Also
Rescale application, Convert
Example of use
il: GomaAvant.png GomaApres.png
out: otbConcatenateImages.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ConcatenateVectorData.html b/python/plugins/processing/otb/description/doc/ConcatenateVectorData.html
index 5acbac5451ea..8ff5e5632c0c 100644
--- a/python/plugins/processing/otb/description/doc/ConcatenateVectorData.html
+++ b/python/plugins/processing/otb/description/doc/ConcatenateVectorData.html
@@ -1 +1,5 @@
-
Concatenate Application
Brief Description
Concatenate VectorDatas
Tags
Vector data Manipulation
Long Description
This application concatenate a list of VectorData to produce a unique VectorData as outputNote that the VectorDatas must be of the same type (Storing polygons only, lines only, or points only)
Parameters
[param] Input VectorDatas to concatenate (-vd): VectorData files to be concatenated in an unique VectorData
[param] Concatenated VectorData (-out):
Limitations
None
Authors
OTB-Team
See also
Example of use
Parameters to set value:
Input VectorDatas to concatenate: waterways.shp france_coastline.shp
This application concatenates a list of VectorData to produce a unique VectorData as output.Note that the VectorDatas must be of the same type (Storing polygons only, lines only, or points only)
Parameters
[param] -vd <string> VectorData files to be concatenated in an unique VectorData. Mandatory: True. Default Value: "0"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ConnectedComponentSegmentation.html b/python/plugins/processing/otb/description/doc/ConnectedComponentSegmentation.html
index c992f81aa89a..de09bcd75865 100644
--- a/python/plugins/processing/otb/description/doc/ConnectedComponentSegmentation.html
+++ b/python/plugins/processing/otb/description/doc/ConnectedComponentSegmentation.html
@@ -1 +1,5 @@
-
Connected Component Segmentation Application
Brief Description
Compute a connected component segmentation.
Tags
Image Analysis, Segmentation
Long Description
Compute a connected component segmentation, which takes mathematical formula as a neighborhood thresholding criteria.
Parameters
[param] Input Image (-in): The image to segment.
[param] Output Shape (-out): The segmentation shape.
[param] Mask expression (-mask): Mask mathematical expression (only if support image is given)
[param] Connected Component Expression (-expr): Formula used for connected component segmentation
[param] Minimum Object Size (-minsize): Min object size (area in pixel)
Connected component segmentation and object based image filtering of the input image according to user-defined criterions.
Tags
Image Analysis,Segmentation
Long Description
This application allows to perform a masking, connected components segmentation and object based image filtering. First and optionally, a mask can be built based on user-defined criterions to select pixels of the image which will be segmented. Then a connected component segmentation is performed with a user defined criterion to decide whether two neighbouring pixels belong to the same segment or not. After this segmentation step, an object based image filtering is applied using another user-defined criterion reasoning on segment properties, like shape or radiometric attributes. Criterions are mathematical expressions analysed by the MuParser library (http://muparser.sourceforge.net/). For instance, expression "((b1>80) and intensity>95)" will merge two neighbouring pixel in a single segment if their intensity is more than 95 and their value in the first image band is more than 80. See parameters documentation for a list of available attributes. The output of the object based image filtering is vectorized and can be written in shapefile or KML format. If the input image is in raw geometry, resulting polygons will be transformed to WGS84 using sensor modelling before writing, to ensure consistency with GIS softwares. For this purpose, a Digital Elevation Model can be provided to the application. The whole processing is done on a per-tile basis for large images, so this application can handle images of arbitrary size.
Parameters
[param] -in <string> The image to segment.. Mandatory: True. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
Limitations
Due to the tiling scheme in case of large images, some segments can be arbitrarily split across multiple tiles.
Authors
OTB-Team
See Also
Example of use
in: ROI_QB_MUL_4.tif
mask: "((b1>80)*intensity>95)"
expr: "distance<10"
minsize: 15
obia: "SHAPE_Elongation>8"
out: ConnectedComponentSegmentation.shp
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Convert.html b/python/plugins/processing/otb/description/doc/Convert.html
index c584afd15e1a..cab60f0c3f7c 100644
--- a/python/plugins/processing/otb/description/doc/Convert.html
+++ b/python/plugins/processing/otb/description/doc/Convert.html
@@ -1,2 +1,6 @@
-
Image Conversion Application
Brief Description
Convert an image to a different format, eventually rescaling the data and/or changing the pixel type.
Tags
Conversion, Image Dynamic
Long Description
This application performs an image pixel type conversion (short, ushort, char, uchar, int, uint, float and double types are handled). The output image is written in the specified format (ie. that corresponds to the given extension).
- The convertion can include a rescale usiong the image 2% minimum and maximum values. The rescale can be linear or log2.
Parameters
[param] Input image (-in): Input image
[choice] Rescale type (-type): Transfer function for the rescaling
otbcli_Convert -in QB_Toulouse_Ortho_XS.tif -type linear -out otbConvertWithScalingOutput.png uchar
\ No newline at end of file
+
+
+
Convert
Brief Description
Convert an image to a different format, eventually rescaling the data and/or changing the pixel type.
Tags
Conversion,Image Dynamic,Image Manipulation
Long Description
This application performs an image pixel type conversion (short, ushort, uchar, int, uint, float and double types are handled). The output image is written in the specified format (ie. that corresponds to the given extension).
+ The convertion can include a rescale using the image 2 percent minimum and maximum values. The rescale can be linear or log2.
[param] -mask <string> The masked pixels won't be used to adapt the dynamic (the mask must have the same dimensions as the input image). Mandatory: False. Default Value: ""
[param] -hcp <string> Parameters to cut the histogram edges before rescaling. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -type Transfer function for the rescaling none,linear,log2. Mandatory: True. Default Value: "none"
[group] -none
[group] -linear
[group] -log2
Limitations
None
Authors
OTB-Team
See Also
Rescale
Example of use
in: QB_Toulouse_Ortho_XS.tif
out: otbConvertWithScalingOutput.png uchar
type: linear
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ConvertCartoToGeoPoint.html b/python/plugins/processing/otb/description/doc/ConvertCartoToGeoPoint.html
index a6f7223058ff..a677627d74e1 100644
--- a/python/plugins/processing/otb/description/doc/ConvertCartoToGeoPoint.html
+++ b/python/plugins/processing/otb/description/doc/ConvertCartoToGeoPoint.html
@@ -1 +1,5 @@
-
Cartographic to geographic coordinates conversion
Brief Description
Convert cartographic coordinates to geographic one.
Tags
Coordinates, Geometry
Long Description
This application computes the geographic coordinates from a cartographic one. user has to give the X and Y coordinate and the cartographic projection (UTM/LAMBERT/LAMBERT2/LAMBERT93/SINUS/ECKERT4/TRANSMERCATOR/MOLLWEID/SVY21).
Parameters
[group] Input cartographic coordinates (-carto):
[param] X cartographic coordinates (-x): X cartographic coordinates in the specified projection.
[param] Y cartographic coordinates (-y): Y cartographic coordinates in the specified projection.
[choice] Map projection type (-mapproj): Type of projection used for the conversion. Possible values are: utm, lambert, lambert2, lambert93, sinus, eckert4, transmercator, mollweid and svy21.
[group] utm:
[param] zone (-zone): UTM zone of the point.
[param] Is in north hemisphere (-hemisphere): Is the point is in the north hemisphere or not.
[group] lambert:
[param] First parallele degree (-parallel1degree): First Lambert parallele degree.
[param] Second parallele degree (-parallel2degree): Second Lambert parallele degree.
Convert cartographic coordinates to geographic one.
Tags
Coordinates,Geometry
Long Description
This application computes the geographic coordinates from a cartographic one. User has to give the X and Y coordinate and the cartographic projection (UTM/LAMBERT/LAMBERT2/LAMBERT93/SINUS/ECKERT4/TRANSMERCATOR/MOLLWEID/SVY21).
[param] -lat <float> Point latitude coordinates.. Mandatory: True. Default Value: "0.0"
[choice] -mapproj Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -mapproj.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -mapproj.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -mapproj.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
Limitations
None
Authors
OTB-Team
See Also
Example of use
carto.x: 367074.625
carto.y: 4835740
mapproj: utm
mapproj.utm.northhem: true
mapproj.utm.zone: 31
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ConvertSensorToGeoPoint.html b/python/plugins/processing/otb/description/doc/ConvertSensorToGeoPoint.html
new file mode 100644
index 000000000000..f80ec6b3e0b3
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ConvertSensorToGeoPoint.html
@@ -0,0 +1,5 @@
+
+
+
ConvertSensorToGeoPoint
Brief Description
Sensor to geographic coordinates conversion.
Tags
Geometry
Long Description
This Application converts a sensor point of an input image to a geographic point using the Forward Sensor Model of the input image.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/DSFuzzyModelEstimation.html b/python/plugins/processing/otb/description/doc/DSFuzzyModelEstimation.html
index ed997ac4b972..2f4790730f12 100644
--- a/python/plugins/processing/otb/description/doc/DSFuzzyModelEstimation.html
+++ b/python/plugins/processing/otb/description/doc/DSFuzzyModelEstimation.html
@@ -1 +1,5 @@
-
Fuzzy Model estimation Application
Brief Description
Estimate feature fuzzy model parameters using 2 vector data (ground truth samples and wrong samples).
Tags
Feature Extraction
Long Description
Estimate feature fuzzy model parameters using 2 vector data (ground truth samples and wrong samples).
Parameters
[param] Input Positive Vector Data (-psin): Ground truth vector data for positive samples
[param] Input Negative Vector Data (-nsin): Ground truth vector data for negative samples
[param] Belief Support (-belsup): Dempster Shafer study hypothesis to compute belief
[param] Plausibility Support (-plasup): Dempster Shafer study hypothesis to compute plausibility
[param] -wgt <float> Coefficient between 0 and 1 to promote undetection or false detections (default 0.5). Mandatory: False. Default Value: "0.5"
[param] -initmod <string> Initialization model (xml file) to be used. If the xml initialization model is set, the descriptor list is not used (specified using the option -desclist). Mandatory: False. Default Value: ""
[param] -desclist <string> List of the descriptors to be used in the model (must be specified to perform an automatic initialization). Mandatory: False. Default Value: ""
[param] -maxnbit <int32> Maximum number of optimizer iteration (default 200). Mandatory: False. Default Value: "200"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/DimensionalityReduction-ica.html b/python/plugins/processing/otb/description/doc/DimensionalityReduction-ica.html
new file mode 100644
index 000000000000..1a624e336723
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/DimensionalityReduction-ica.html
@@ -0,0 +1,5 @@
+
+
+
DimensionalityReduction
Brief Description
Perform Dimension reduction of the input image.
Tags
Dimensionality Reduction,Image Filtering
Long Description
Performs dimensionality reduction on input image. PCA,NA-PCA,MAF,ICA methods are available.
Parameters
[param] -in <string> The input image to apply dimensionality reduction.. Mandatory: True. Default Value: ""
[param] -out <string> output image. Components are ordered by decreasing eigenvalues.. Mandatory: True. Default Value: ""
Though the inverse transform can be computed, this application only provides the forward transform for now.
Authors
OTB-Team
See Also
"Kernel maximum autocorrelation factor and minimum noise fraction transformations," IEEE Transactions on Image Processing, vol. 20, no. 3, pp. 612-624, (2011)
Example of use
in: cupriteSubHsi.tif
out: FilterOutput.tif
method: pca
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/DimensionalityReduction-maf.html b/python/plugins/processing/otb/description/doc/DimensionalityReduction-maf.html
new file mode 100644
index 000000000000..1a624e336723
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/DimensionalityReduction-maf.html
@@ -0,0 +1,5 @@
+
+
+
DimensionalityReduction
Brief Description
Perform Dimension reduction of the input image.
Tags
Dimensionality Reduction,Image Filtering
Long Description
Performs dimensionality reduction on input image. PCA,NA-PCA,MAF,ICA methods are available.
Parameters
[param] -in <string> The input image to apply dimensionality reduction.. Mandatory: True. Default Value: ""
[param] -out <string> output image. Components are ordered by decreasing eigenvalues.. Mandatory: True. Default Value: ""
Though the inverse transform can be computed, this application only provides the forward transform for now.
Authors
OTB-Team
See Also
"Kernel maximum autocorrelation factor and minimum noise fraction transformations," IEEE Transactions on Image Processing, vol. 20, no. 3, pp. 612-624, (2011)
Example of use
in: cupriteSubHsi.tif
out: FilterOutput.tif
method: pca
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/DimensionalityReduction-napca.html b/python/plugins/processing/otb/description/doc/DimensionalityReduction-napca.html
new file mode 100644
index 000000000000..1a624e336723
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/DimensionalityReduction-napca.html
@@ -0,0 +1,5 @@
+
+
+
DimensionalityReduction
Brief Description
Perform Dimension reduction of the input image.
Tags
Dimensionality Reduction,Image Filtering
Long Description
Performs dimensionality reduction on input image. PCA,NA-PCA,MAF,ICA methods are available.
Parameters
[param] -in <string> The input image to apply dimensionality reduction.. Mandatory: True. Default Value: ""
[param] -out <string> output image. Components are ordered by decreasing eigenvalues.. Mandatory: True. Default Value: ""
Though the inverse transform can be computed, this application only provides the forward transform for now.
Authors
OTB-Team
See Also
"Kernel maximum autocorrelation factor and minimum noise fraction transformations," IEEE Transactions on Image Processing, vol. 20, no. 3, pp. 612-624, (2011)
Example of use
in: cupriteSubHsi.tif
out: FilterOutput.tif
method: pca
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/DimensionalityReduction-pca.html b/python/plugins/processing/otb/description/doc/DimensionalityReduction-pca.html
new file mode 100644
index 000000000000..1a624e336723
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/DimensionalityReduction-pca.html
@@ -0,0 +1,5 @@
+
+
+
DimensionalityReduction
Brief Description
Perform Dimension reduction of the input image.
Tags
Dimensionality Reduction,Image Filtering
Long Description
Performs dimensionality reduction on input image. PCA,NA-PCA,MAF,ICA methods are available.
Parameters
[param] -in <string> The input image to apply dimensionality reduction.. Mandatory: True. Default Value: ""
[param] -out <string> output image. Components are ordered by decreasing eigenvalues.. Mandatory: True. Default Value: ""
Performs dimensionality reduction on input image. PCA,NA-PCA,MAF,ICA methods are available.
-
Parameters
-
This section describes in details the parameters available for this application. Table 4.27, page 387
-presents a summary of these parameters and the parameters keys to be used in command-line and
-programming languages. Application key is DimensionalityReduction.
-
-
-
-
-
-
-
-
Parameter key
Parameter type
Parameter description
-
in
Input image
Input Image
-
out
Output image
Output Image
-
rescale
Group
Rescale Output.
-
rescale.outmin
Float
Output min value
-
rescale.outmax
Float
Output max value
-
outinv
Output image
Inverse Output Image
-
method
Choices
Algorithm
-
method pca
Choice
PCA
-
method napca
Choice
NA-PCA
-
method maf
Choice
MAF
-
method ica
Choice
ICA
-
method.napca.radiusx
Int
Set the x radius of the sliding window.
-
method.napca.radiusy
Int
Set the y radius of the sliding window.
-
method.ica.iter
Int
number of iterations
-
method.ica.mu
Float
Give the increment weight of W in [0, 1]
-
nbcomp
Int
Number of Components.
-
normalize
Boolean
Normalize.
-
-
-
Table 4.27: Parameters table for Dimensionality reduction application.
-
-
-
-
-
Input Image
-The input image to apply dimensionality reduction.
-
Output Image
-output image. Components are ordered by decreasing eigenvalues.
-
Rescale Output.
-
-
Output min value: Minimum value of the output image.
-
-
Output max value: Maximum value of the output image.
-
-
Inverse Output Image
-reconstruct output image.
-
Algorithm
-Selection of the reduction dimension method. Available choices are:
-
-
PCA: Principal Component Analysis.
-
-
NA-PCA: Noise Adjusted Principal Component Analysis.
-
-
Set the x radius of the sliding window.:
-
-
Set the y radius of the sliding window.:
-
-
MAF: Maximum Autocorrelation Factor.
-
-
-
-
ICA: Independent Component Analysis.
-
-
number of iterations :
-
-
Give the increment weight of W in [0, 1]:
-
-
Number of Components.
-Number of relevant components kept. By default all components are kept.
-
Normalize.
-center AND reduce data before Dimensionality reduction.
-
-
Example
-
To run this example in command-line, use the following:
-
Projects a disparity map into a regular elevation map
-
Detailed description
-
This application uses a disparity map computed from a stereo image pair to produce an elevation map on the
-ground area covered by the stereo pair. The needed inputs are : the disparity map, the stereo pair (in original
-geometry) and the epipolar deformation grids. These grids have to link the original geometry (stereo pair)
-and the epipolar geometry (disparity map).
-
Parameters
-
This section describes in details the parameters available for this application. Table 4.39, page 454
-presents a summary of these parameters and the parameters keys to be used in command-line and
-programming languages. Application key is DisparityMapToElevationMap.
-
-
-
-
-
-
-
-
Parameter key
Parameter type
Parameter description
-
io
Group
Input and output data
-
io.in
Input image
Input disparity map
-
io.left
Input image
Left sensor image
-
io.right
Input image
Right sensor image
-
io.lgrid
Input image
Left Grid
-
io.rgrid
Input image
Right Grid
-
io.out
Output image
Output elevation map
-
io.mask
Input image
Disparity mask
-
step
Float
DEM step
-
hmin
Float
Minimum elevation expected
-
hmax
Float
Maximum elevation expected
-
elev
Choices
Elevation management
-
elev dem
Choice
DEM directory
-
elev average
Choice
Average Elevation
-
elev.dem.path
Directory
DEM directory
-
elev.dem.geoid
Input File name
Geoid File
-
elev.average.value
Float
Average Elevation
-
ram
Int
Available RAM (Mb)
-
-
-
Table 4.39: Parameters table for Disparity map to elevation map.
-
-
-
-
-
Input and output data
-This group of parameters allows to set the input and output images and grids.
-
-
Input disparity map: The input disparity map (horizontal disparity in first band, vertical in
- second)
-
-
Left sensor image: Left image in original (sensor) geometry
-
-
Right sensor image: Right image in original (sensor) geometry
-
-
Left Grid: Left epipolar grid (deformation grid between sensor et disparity spaces)
-
-
Right Grid: Right epipolar grid (deformation grid between sensor et disparity spaces)
-
-
Output elevation map: Output elevation map in ground projection
-
-
Disparity mask: Masked disparity cells won’t be projected
-
-
DEM step
-Spacing of the output elevation map (in meters)
-
Minimum elevation expected
-Minimum elevation expected (in meters)
-
Maximum elevation expected
-Maximum elevation expected (in meters)
-
-
-
Elevation management
-This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any
-geotiff processed by the DEM import application Available choices are:
-
-
DEM directory
-
-
DEM directory: This parameter allows to select a directory containing Digital
- Elevation Model tiles
-
-
Geoid File: Use a geoid grid to get the height above the ellipsoid used
-
-
Average Elevation
-
-
Average Elevation: This parameter allows to pick up an average elevation for all the
- points of the image.
-
-
Available RAM (Mb)
-Available memory for processing (in MB)
-
-
Example
-
To run this example in command-line, use the following:
-
Projects a disparity map into a regular elevation map
Tags
Stereo
Long Description
This application uses a disparity map computed from a stereo image pair to produce an elevation map on the ground area covered by the stereo pair. The needed inputs are : the disparity map, the stereo pair (in original geometry) and the epipolar deformation grids. These grids have to link the original geometry (stereo pair) and the epipolar geometry (disparity map).
Parameters
[param] -io <string> This group of parameters allows to set the input and output images and grids.. Mandatory: True. Default Value: "0"
[param] -step <float> Spacing of the output elevation map (in meters). Mandatory: True. Default Value: "5"
[param] -hmax <float> Maximum elevation expected (in meters). Mandatory: True. Default Value: "100"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/DownloadSRTMTiles.html b/python/plugins/processing/otb/description/doc/DownloadSRTMTiles.html
new file mode 100644
index 000000000000..f3982026371a
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/DownloadSRTMTiles.html
@@ -0,0 +1,5 @@
+
+
+
DownloadSRTMTiles
Brief Description
Download or list SRTM tiles related to a set of images
Tags
Utilities,Image Manipulation
Long Description
This application allows to select the appropriate SRTM tiles that covers a list of images. It builds a list of the required tiles. Two modes are available: the first one download those tiles from the USGS SRTM3 website (http://dds.cr.usgs.gov/srtm/version2_1/SRTM3/), the second one list those tiles in a local directory. In both cases, you need to indicate the directory in which directory tiles will be download or the location of local SRTM files.
Parameters
[param] -il <string> The list of images on which you want to determine corresponding SRTM tiles.. Mandatory: True. Default Value: "0"
[param] -mode.download.outdir <string> Directory where zipped tiles will be save. You'll need to unzip all tile files before using them in your application.. Mandatory: True. Default Value: ""
[group] -list
[param] -mode.list.indir <string> Input directory where SRTM tiles can are located.. Mandatory: True. Default Value: ""
Limitations
None
Authors
OTB-Team
See Also
Example of use
il: QB_Toulouse_Ortho_XS.tif
mode: list
mode.list.indir: /home/user/srtm_dir/
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/EdgeExtraction-gradient.html b/python/plugins/processing/otb/description/doc/EdgeExtraction-gradient.html
new file mode 100644
index 000000000000..db327d661980
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/EdgeExtraction-gradient.html
@@ -0,0 +1,5 @@
+
+
+
EdgeExtraction
Brief Description
Computes edge features on every pixel of the input image selected channel
Tags
Edge,Feature Extraction
Long Description
This application computes edge features on a mono band image
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/EdgeExtraction-sobel.html b/python/plugins/processing/otb/description/doc/EdgeExtraction-sobel.html
new file mode 100644
index 000000000000..db327d661980
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/EdgeExtraction-sobel.html
@@ -0,0 +1,5 @@
+
+
+
EdgeExtraction
Brief Description
Computes edge features on every pixel of the input image selected channel
Tags
Edge,Feature Extraction
Long Description
This application computes edge features on a mono band image
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/EdgeExtraction-touzi.html b/python/plugins/processing/otb/description/doc/EdgeExtraction-touzi.html
new file mode 100644
index 000000000000..db327d661980
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/EdgeExtraction-touzi.html
@@ -0,0 +1,5 @@
+
+
+
EdgeExtraction
Brief Description
Computes edge features on every pixel of the input image selected channel
Tags
Edge,Feature Extraction
Long Description
This application computes edge features on a mono band image
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/EdgeExtraction.html b/python/plugins/processing/otb/description/doc/EdgeExtraction.html
new file mode 100644
index 000000000000..db327d661980
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/EdgeExtraction.html
@@ -0,0 +1,5 @@
+
+
+
EdgeExtraction
Brief Description
Computes edge features on every pixel of the input image selected channel
Tags
Edge,Feature Extraction
Long Description
This application computes edge features on a mono band image
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ExtractROI-fit.html b/python/plugins/processing/otb/description/doc/ExtractROI-fit.html
new file mode 100644
index 000000000000..d41e6ff33c1d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ExtractROI-fit.html
@@ -0,0 +1,5 @@
+
+
+
ExtractROI
Brief Description
Extract a ROI defined by the user.
Tags
Image Manipulation
Long Description
This application extracts a Region Of Interest with user defined size, or reference image.
[param] -mode.fit.ref <string> Reference image to define the ROI. Mandatory: True. Default Value: ""
[param] -mode.fit.elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -mode.fit.elev.dem <string> This parameter allows to select a directory containing Digital Elevation Model tiles. Mandatory: False. Default Value: ""
[param] -mode.fit.elev.geoid <string> Use a geoid grid to get the height above the ellipsoid in case there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles. A version of the geoid can be found on the OTB website (http://hg.orfeo-toolbox.org/OTB-Data/raw-file/404aa6e4b3e0/Input/DEM/egm96.grd).. Mandatory: False. Default Value: ""
[param] -mode.fit.elev.default <float> This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.. Mandatory: True. Default Value: "0"
[choice] -cl Channels to write in the output image. . Mandatory: True. Default Value: ""
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: VegetationIndex.hd
startx: 40
starty: 250
sizex: 150
sizey: 150
out: ExtractROI.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ExtractROI-standard.html b/python/plugins/processing/otb/description/doc/ExtractROI-standard.html
new file mode 100644
index 000000000000..d41e6ff33c1d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ExtractROI-standard.html
@@ -0,0 +1,5 @@
+
+
+
ExtractROI
Brief Description
Extract a ROI defined by the user.
Tags
Image Manipulation
Long Description
This application extracts a Region Of Interest with user defined size, or reference image.
[param] -mode.fit.ref <string> Reference image to define the ROI. Mandatory: True. Default Value: ""
[param] -mode.fit.elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -mode.fit.elev.dem <string> This parameter allows to select a directory containing Digital Elevation Model tiles. Mandatory: False. Default Value: ""
[param] -mode.fit.elev.geoid <string> Use a geoid grid to get the height above the ellipsoid in case there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles. A version of the geoid can be found on the OTB website (http://hg.orfeo-toolbox.org/OTB-Data/raw-file/404aa6e4b3e0/Input/DEM/egm96.grd).. Mandatory: False. Default Value: ""
[param] -mode.fit.elev.default <float> This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.. Mandatory: True. Default Value: "0"
[choice] -cl Channels to write in the output image. . Mandatory: True. Default Value: ""
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: VegetationIndex.hd
startx: 40
starty: 250
sizex: 150
sizey: 150
out: ExtractROI.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ExtractROI.html b/python/plugins/processing/otb/description/doc/ExtractROI.html
index dfd3a846a8da..d41e6ff33c1d 100644
--- a/python/plugins/processing/otb/description/doc/ExtractROI.html
+++ b/python/plugins/processing/otb/description/doc/ExtractROI.html
@@ -1 +1,5 @@
-
Extract ROI Application
Brief Description
Extract a ROI defined by the user.
Tags
Image Manipulation
Long Description
This application extracts a Region Of Interest with user defined size.
[param] -mode.fit.ref <string> Reference image to define the ROI. Mandatory: True. Default Value: ""
[param] -mode.fit.elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -mode.fit.elev.dem <string> This parameter allows to select a directory containing Digital Elevation Model tiles. Mandatory: False. Default Value: ""
[param] -mode.fit.elev.geoid <string> Use a geoid grid to get the height above the ellipsoid in case there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles. A version of the geoid can be found on the OTB website (http://hg.orfeo-toolbox.org/OTB-Data/raw-file/404aa6e4b3e0/Input/DEM/egm96.grd).. Mandatory: False. Default Value: ""
[param] -mode.fit.elev.default <float> This parameter allows to set the default height above ellipsoid when there is no DEM available, no coverage for some points or pixels with no_data in the DEM tiles, and no geoid file has been set. This is also used by some application as an average elevation value.. Mandatory: True. Default Value: "0"
[choice] -cl Channels to write in the output image. . Mandatory: True. Default Value: ""
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: VegetationIndex.hd
startx: 40
starty: 250
sizex: 150
sizey: 150
out: ExtractROI.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/FineRegistration.html b/python/plugins/processing/otb/description/doc/FineRegistration.html
index 211f92dc47f9..253696c8b3f6 100644
--- a/python/plugins/processing/otb/description/doc/FineRegistration.html
+++ b/python/plugins/processing/otb/description/doc/FineRegistration.html
@@ -1 +1,5 @@
-
Fine Registration Application
Brief Description
Estimate disparity map between two images.
Tags
Stereo
Long Description
Estimate disparity map between two images. Output image contain x offset, y offset and metric value.
Parameters
[param] Reference Image (-ref): The reference image.
[param] Secondary Image (-sec): The secondary image.
[param] Output Image (-out): The output image.
[param] Exploration Radius X (-erx): The exploration radius along x (in pixels)
[param] Exploration Radius Y (-ery): The exploration radius along y (in pixels)
[param] Metric Radius X (-mrx): Radius along x (in pixels) of the metric computation window
[param] Metric Radius Y (-mry): Radius along y (in pixels) of the metric computation window
[param] Image To Warp (-w): The image to warp after disparity estimation is complete
[param] Output Warped Image (-wo): The output warped image
[param] Coarse Offset X (-cox): Coarse offset along x (in physical space) between the two images
[param] Coarse Offset Y (-coy): Coarse offset along y (in physical space) between the two images
[param] Sub-Sampling Rate X (-ssrx): Generates a result at a coarser resolution with a given sub-sampling rate along X
[param] Sub-Sampling Rate Y (-ssry): Generates a result at a coarser resolution with a given sub-sampling rate along Y
[param] Reference Gaussian Smoothing X (-rgsx): Performs a gaussian smoothing of the reference image. Parameter is gaussian sigma (in pixels) in X direction.
[param] Reference Gaussian Smoothing Y (-rgsy): Performs a gaussian smoothing of the reference image. Parameter is gaussian sigma (in pixels) in Y direction.
[param] Secondary Gaussian Smoothing X (-sgsx): Performs a gaussian smoothing of the secondary image. Parameter is gaussian sigma (in pixels) in X direction.
[param] Secondary Gaussian Smoothing Y (-sgsy): Performs a gaussian smoothing of the secondary image. Parameter is gaussian sigma (in pixels) in Y direction.
[param] Metric (-m): Choose the metric used for block matching. Available metrics are cross-correlation (CC), cross-correlation with subtracted mean (CCSM), mean-square difference (MSD), mean reciprocal square difference (MRSD) and mutual information (MI). Default is cross-correlation
[param] SubPixelAccuracy (-spa): Metric extrema location will be refined up to the given accuracy. Default is 0.01
[param] Validity Mask Lower Threshold (-vmlt): Lower threshold to obtain a validity mask.
[param] Validity Mask Upper Than (-vmut): Upper threshold to obtain a validity mask.
[param] -cox <float> Coarse offset along x (in physical space) between the two images. Mandatory: False. Default Value: "0"
[param] -coy <float> Coarse offset along y (in physical space) between the two images. Mandatory: False. Default Value: "0"
[param] -ssrx <float> Generates a result at a coarser resolution with a given sub-sampling rate along X. Mandatory: False. Default Value: "1"
[param] -ssry <float> Generates a result at a coarser resolution with a given sub-sampling rate along Y. Mandatory: False. Default Value: "1"
[param] -rgsx <float> Performs a gaussian smoothing of the reference image. Parameter is gaussian sigma (in pixels) in X direction.. Mandatory: False. Default Value: "0.0"
[param] -rgsy <float> Performs a gaussian smoothing of the reference image. Parameter is gaussian sigma (in pixels) in Y direction.. Mandatory: False. Default Value: "0.0"
[param] -sgsx <float> Performs a gaussian smoothing of the secondary image. Parameter is gaussian sigma (in pixels) in X direction.. Mandatory: False. Default Value: "0.0"
[param] -sgsy <float> Performs a gaussian smoothing of the secondary image. Parameter is gaussian sigma (in pixels) in Y direction.. Mandatory: False. Default Value: "0.0"
[param] -m <string> Choose the metric used for block matching. Available metrics are cross-correlation (CC), cross-correlation with subtracted mean (CCSM), mean-square difference (MSD), mean reciprocal square difference (MRSD) and mutual information (MI). Default is cross-correlation. Mandatory: False. Default Value: ""
[param] -spa <float> Metric extrema location will be refined up to the given accuracy. Default is 0.01. Mandatory: False. Default Value: "0.01"
[param] -vmlt <float> Lower threshold to obtain a validity mask.. Mandatory: False. Default Value: "0.0"
[param] -vmut <float> Upper threshold to obtain a validity mask.. Mandatory: False. Default Value: "0.0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
None
Authors
OTB-Team
See Also
Example of use
ref: StereoFixed.png
sec: StereoMoving.png
out: FineRegistration.tif
erx: 2
ery: 2
mrx: 3
mry: 3
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/FusionOfClassifications-dempstershafer.html b/python/plugins/processing/otb/description/doc/FusionOfClassifications-dempstershafer.html
new file mode 100644
index 000000000000..5f91e9e9ef87
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/FusionOfClassifications-dempstershafer.html
@@ -0,0 +1,9 @@
+
+
+
FusionOfClassifications
Brief Description
Fuses several classifications maps of the same image on the basis of class labels.
Tags
Learning,Image Analysis
Long Description
This application allows to fuse several classification maps and produces a single more robust classification map. Fusion is done either by mean of Majority Voting, or with the Dempster Shafer combination method on class labels.
+ -MAJORITY VOTING: for each pixel, the class with the highest number of votes is selected.
+ -DEMPSTER SHAFER: for each pixel, the class label for which the Belief Function is maximal is selected. This Belief Function is calculated by mean of the Dempster Shafer combination of Masses of Belief, and indicates the belief that each input classification map presents for each label value. Moreover, the Masses of Belief are based on the input confusion matrices of each classification map, either by using the PRECISION or RECALL rates, or the OVERALL ACCURACY, or the KAPPA coefficient. Thus, each input classification map needs to be associated with its corresponding input confusion matrix file for the Dempster Shafer fusion.
+-Input pixels with the NODATA label are not handled in the fusion of classification maps. Moreover, pixels for which all the input classifiers are set to NODATA keep this value in the output fused image.
+-In case of number of votes equality, the UNDECIDED label is attributed to the pixel.
Parameters
[param] -il <string> List of input classification maps to fuse. Labels in each classification image must represent the same class.. Mandatory: True. Default Value: "0"
[param] -nodatalabel <int32> Label for the NoData class. Such input pixels keep their NoData label in the output image and are not handled in the fusion process. By default, 'nodatalabel = 0'.. Mandatory: True. Default Value: "0"
[param] -undecidedlabel <int32> Label for the Undecided class. Pixels with more than 1 fused class are marked as Undecided. Please note that the Undecided value must be different from existing labels in the input classifications. By default, 'undecidedlabel = 0'.. Mandatory: True. Default Value: "0"
[param] -out <string> The output classification image resulting from the fusion of the input classification images.. Mandatory: True. Default Value: ""
[choice] -method Selection of the fusion method and its parameters. majorityvoting,dempstershafer. Mandatory: True. Default Value: "majorityvoting"
[group] -majorityvoting
[group] -dempstershafer
[param] -method.dempstershafer.cmfl <string> A list of confusion matrix files (*.CSV format) to define the masses of belief and the class labels. Each file should be formatted the following way: the first line, beginning with a '#' symbol, should be a list of the class labels present in the corresponding input classification image, organized in the same order as the confusion matrix rows/columns.. Mandatory: True. Default Value: "0"
[param] -method.dempstershafer.mob <string> Type of confusion matrix measurement used to compute the masses of belief of each classifier.. Mandatory: True. Default Value: "precision"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/FusionOfClassifications-majorityvoting.html b/python/plugins/processing/otb/description/doc/FusionOfClassifications-majorityvoting.html
new file mode 100644
index 000000000000..5f91e9e9ef87
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/FusionOfClassifications-majorityvoting.html
@@ -0,0 +1,9 @@
+
+
+
FusionOfClassifications
Brief Description
Fuses several classifications maps of the same image on the basis of class labels.
Tags
Learning,Image Analysis
Long Description
This application allows to fuse several classification maps and produces a single more robust classification map. Fusion is done either by mean of Majority Voting, or with the Dempster Shafer combination method on class labels.
+ -MAJORITY VOTING: for each pixel, the class with the highest number of votes is selected.
+ -DEMPSTER SHAFER: for each pixel, the class label for which the Belief Function is maximal is selected. This Belief Function is calculated by mean of the Dempster Shafer combination of Masses of Belief, and indicates the belief that each input classification map presents for each label value. Moreover, the Masses of Belief are based on the input confusion matrices of each classification map, either by using the PRECISION or RECALL rates, or the OVERALL ACCURACY, or the KAPPA coefficient. Thus, each input classification map needs to be associated with its corresponding input confusion matrix file for the Dempster Shafer fusion.
+-Input pixels with the NODATA label are not handled in the fusion of classification maps. Moreover, pixels for which all the input classifiers are set to NODATA keep this value in the output fused image.
+-In case of number of votes equality, the UNDECIDED label is attributed to the pixel.
Parameters
[param] -il <string> List of input classification maps to fuse. Labels in each classification image must represent the same class.. Mandatory: True. Default Value: "0"
[param] -nodatalabel <int32> Label for the NoData class. Such input pixels keep their NoData label in the output image and are not handled in the fusion process. By default, 'nodatalabel = 0'.. Mandatory: True. Default Value: "0"
[param] -undecidedlabel <int32> Label for the Undecided class. Pixels with more than 1 fused class are marked as Undecided. Please note that the Undecided value must be different from existing labels in the input classifications. By default, 'undecidedlabel = 0'.. Mandatory: True. Default Value: "0"
[param] -out <string> The output classification image resulting from the fusion of the input classification images.. Mandatory: True. Default Value: ""
[choice] -method Selection of the fusion method and its parameters. majorityvoting,dempstershafer. Mandatory: True. Default Value: "majorityvoting"
[group] -majorityvoting
[group] -dempstershafer
[param] -method.dempstershafer.cmfl <string> A list of confusion matrix files (*.CSV format) to define the masses of belief and the class labels. Each file should be formatted the following way: the first line, beginning with a '#' symbol, should be a list of the class labels present in the corresponding input classification image, organized in the same order as the confusion matrix rows/columns.. Mandatory: True. Default Value: "0"
[param] -method.dempstershafer.mob <string> Type of confusion matrix measurement used to compute the masses of belief of each classifier.. Mandatory: True. Default Value: "precision"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/FusionOfClassifications.html b/python/plugins/processing/otb/description/doc/FusionOfClassifications.html
new file mode 100644
index 000000000000..5f91e9e9ef87
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/FusionOfClassifications.html
@@ -0,0 +1,9 @@
+
+
+
FusionOfClassifications
Brief Description
Fuses several classifications maps of the same image on the basis of class labels.
Tags
Learning,Image Analysis
Long Description
This application allows to fuse several classification maps and produces a single more robust classification map. Fusion is done either by mean of Majority Voting, or with the Dempster Shafer combination method on class labels.
+ -MAJORITY VOTING: for each pixel, the class with the highest number of votes is selected.
+ -DEMPSTER SHAFER: for each pixel, the class label for which the Belief Function is maximal is selected. This Belief Function is calculated by mean of the Dempster Shafer combination of Masses of Belief, and indicates the belief that each input classification map presents for each label value. Moreover, the Masses of Belief are based on the input confusion matrices of each classification map, either by using the PRECISION or RECALL rates, or the OVERALL ACCURACY, or the KAPPA coefficient. Thus, each input classification map needs to be associated with its corresponding input confusion matrix file for the Dempster Shafer fusion.
+-Input pixels with the NODATA label are not handled in the fusion of classification maps. Moreover, pixels for which all the input classifiers are set to NODATA keep this value in the output fused image.
+-In case of number of votes equality, the UNDECIDED label is attributed to the pixel.
Parameters
[param] -il <string> List of input classification maps to fuse. Labels in each classification image must represent the same class.. Mandatory: True. Default Value: "0"
[param] -nodatalabel <int32> Label for the NoData class. Such input pixels keep their NoData label in the output image and are not handled in the fusion process. By default, 'nodatalabel = 0'.. Mandatory: True. Default Value: "0"
[param] -undecidedlabel <int32> Label for the Undecided class. Pixels with more than 1 fused class are marked as Undecided. Please note that the Undecided value must be different from existing labels in the input classifications. By default, 'undecidedlabel = 0'.. Mandatory: True. Default Value: "0"
[param] -out <string> The output classification image resulting from the fusion of the input classification images.. Mandatory: True. Default Value: ""
[choice] -method Selection of the fusion method and its parameters. majorityvoting,dempstershafer. Mandatory: True. Default Value: "majorityvoting"
[group] -majorityvoting
[group] -dempstershafer
[param] -method.dempstershafer.cmfl <string> A list of confusion matrix files (*.CSV format) to define the masses of belief and the class labels. Each file should be formatted the following way: the first line, beginning with a '#' symbol, should be a list of the class labels present in the corresponding input classification image, organized in the same order as the confusion matrix rows/columns.. Mandatory: True. Default Value: "0"
[param] -method.dempstershafer.mob <string> Type of confusion matrix measurement used to compute the masses of belief of each classifier.. Mandatory: True. Default Value: "precision"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GeneratePlyFile.html b/python/plugins/processing/otb/description/doc/GeneratePlyFile.html
new file mode 100644
index 000000000000..0af728358bcc
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GeneratePlyFile.html
@@ -0,0 +1,5 @@
+
+
+
GeneratePlyFile
Brief Description
Generate a 3D Ply file from a DEM and a color image.
Tags
Geometry
Long Description
Generate a 3D Ply file from a DEM and a color image.
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
Limitations
Authors
OTB-Team
See Also
Example of use
indem: image_dem.tif
out: out.ply
incolor: image_color.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GenerateRPCSensorModel.html b/python/plugins/processing/otb/description/doc/GenerateRPCSensorModel.html
new file mode 100644
index 000000000000..9ee7268ebd68
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GenerateRPCSensorModel.html
@@ -0,0 +1,5 @@
+
+
+
GenerateRPCSensorModel
Brief Description
Generate a RPC sensor model from a list of Ground Control Points.
Tags
Geometry
Long Description
This application generates a RPC sensor model from a list of Ground Control Points. At least 20 points are required for estimation wihtout elevation support, and 40 points for estimation with elevation support. Elevation support will be automatically deactivated if an insufficient amount of points is provided. The application can optionnaly output a file containing accuracy statistics for each point, and a vector file containing segments represening points residues. The map projection parameter allows to define a map projection in which the accuracy is evaluated.
[param] -inpoints <string> Input file containing tie points. Points are stored in following format: row col lon lat. Line beginning with # are ignored.. Mandatory: True. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-closing.html b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-closing.html
new file mode 100644
index 000000000000..18563e259c30
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-closing.html
@@ -0,0 +1,5 @@
+
+
+
GrayScaleMorphologicalOperation
Brief Description
Performs morphological operations on a grayscale input image
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs grayscale morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -structype Choice of the structuring element type ball,cross. Mandatory: True. Default Value: "ball"
[group] -ball
[param] -structype.ball.xradius <int32> The Structuring Element X Radius. Mandatory: True. Default Value: "5"
[param] -structype.ball.yradius <int32> The Structuring Element Y Radius. Mandatory: True. Default Value: "5"
[group] -cross
[choice] -filter Choice of the morphological operation dilate,erode,opening,closing. Mandatory: True. Default Value: "dilate"
[group] -dilate
[group] -erode
[group] -opening
[group] -closing
Limitations
None
Authors
OTB-Team
See Also
itkGrayscaleDilateImageFilter, itkGrayscaleErodeImageFilter, itkGrayscaleMorphologicalOpeningImageFilter and itkGrayscaleMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-dilate.html b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-dilate.html
new file mode 100644
index 000000000000..18563e259c30
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-dilate.html
@@ -0,0 +1,5 @@
+
+
+
GrayScaleMorphologicalOperation
Brief Description
Performs morphological operations on a grayscale input image
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs grayscale morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -structype Choice of the structuring element type ball,cross. Mandatory: True. Default Value: "ball"
[group] -ball
[param] -structype.ball.xradius <int32> The Structuring Element X Radius. Mandatory: True. Default Value: "5"
[param] -structype.ball.yradius <int32> The Structuring Element Y Radius. Mandatory: True. Default Value: "5"
[group] -cross
[choice] -filter Choice of the morphological operation dilate,erode,opening,closing. Mandatory: True. Default Value: "dilate"
[group] -dilate
[group] -erode
[group] -opening
[group] -closing
Limitations
None
Authors
OTB-Team
See Also
itkGrayscaleDilateImageFilter, itkGrayscaleErodeImageFilter, itkGrayscaleMorphologicalOpeningImageFilter and itkGrayscaleMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-erode.html b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-erode.html
new file mode 100644
index 000000000000..18563e259c30
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-erode.html
@@ -0,0 +1,5 @@
+
+
+
GrayScaleMorphologicalOperation
Brief Description
Performs morphological operations on a grayscale input image
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs grayscale morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -structype Choice of the structuring element type ball,cross. Mandatory: True. Default Value: "ball"
[group] -ball
[param] -structype.ball.xradius <int32> The Structuring Element X Radius. Mandatory: True. Default Value: "5"
[param] -structype.ball.yradius <int32> The Structuring Element Y Radius. Mandatory: True. Default Value: "5"
[group] -cross
[choice] -filter Choice of the morphological operation dilate,erode,opening,closing. Mandatory: True. Default Value: "dilate"
[group] -dilate
[group] -erode
[group] -opening
[group] -closing
Limitations
None
Authors
OTB-Team
See Also
itkGrayscaleDilateImageFilter, itkGrayscaleErodeImageFilter, itkGrayscaleMorphologicalOpeningImageFilter and itkGrayscaleMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-opening.html b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-opening.html
new file mode 100644
index 000000000000..18563e259c30
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation-opening.html
@@ -0,0 +1,5 @@
+
+
+
GrayScaleMorphologicalOperation
Brief Description
Performs morphological operations on a grayscale input image
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs grayscale morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -structype Choice of the structuring element type ball,cross. Mandatory: True. Default Value: "ball"
[group] -ball
[param] -structype.ball.xradius <int32> The Structuring Element X Radius. Mandatory: True. Default Value: "5"
[param] -structype.ball.yradius <int32> The Structuring Element Y Radius. Mandatory: True. Default Value: "5"
[group] -cross
[choice] -filter Choice of the morphological operation dilate,erode,opening,closing. Mandatory: True. Default Value: "dilate"
[group] -dilate
[group] -erode
[group] -opening
[group] -closing
Limitations
None
Authors
OTB-Team
See Also
itkGrayscaleDilateImageFilter, itkGrayscaleErodeImageFilter, itkGrayscaleMorphologicalOpeningImageFilter and itkGrayscaleMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation.html b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation.html
new file mode 100644
index 000000000000..18563e259c30
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GrayScaleMorphologicalOperation.html
@@ -0,0 +1,5 @@
+
+
+
GrayScaleMorphologicalOperation
Brief Description
Performs morphological operations on a grayscale input image
Tags
MorphologicalOperations,Feature Extraction
Long Description
This application performs grayscale morphological operations on a mono band image
Parameters
[param] -in <string> The input image to be filtered.. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -structype Choice of the structuring element type ball,cross. Mandatory: True. Default Value: "ball"
[group] -ball
[param] -structype.ball.xradius <int32> The Structuring Element X Radius. Mandatory: True. Default Value: "5"
[param] -structype.ball.yradius <int32> The Structuring Element Y Radius. Mandatory: True. Default Value: "5"
[group] -cross
[choice] -filter Choice of the morphological operation dilate,erode,opening,closing. Mandatory: True. Default Value: "dilate"
[group] -dilate
[group] -erode
[group] -opening
[group] -closing
Limitations
None
Authors
OTB-Team
See Also
itkGrayscaleDilateImageFilter, itkGrayscaleErodeImageFilter, itkGrayscaleMorphologicalOpeningImageFilter and itkGrayscaleMorphologicalClosingImageFilter classes
Example of use
in: qb_RoadExtract.tif
out: opened.tif
channel: 1
structype.ball.xradius: 5
structype.ball.yradius: 5
filter: erode
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/GridBasedImageResampling.html b/python/plugins/processing/otb/description/doc/GridBasedImageResampling.html
new file mode 100644
index 000000000000..cc6f32cee3e5
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/GridBasedImageResampling.html
@@ -0,0 +1,5 @@
+
+
+
GridBasedImageResampling
Brief Description
Resamples an image according to a resampling grid
Tags
Geometry
Long Description
This application allows to perform image resampling from an input resampling grid.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -out <string> Parameters of the output image. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. nn,linear,bco. Mandatory: True. Default Value: "bco"
[group] -nn
[group] -linear
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/HaralickTextureExtraction.html b/python/plugins/processing/otb/description/doc/HaralickTextureExtraction.html
new file mode 100644
index 000000000000..a1d7246471ec
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/HaralickTextureExtraction.html
@@ -0,0 +1,5 @@
+
+
+
HaralickTextureExtraction
Brief Description
Computes textures on every pixel of the input image selected channel
Tags
Textures,Feature Extraction
Long Description
This application computes Haralick, advanced and higher order textures on a mono band image
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
[choice] -texture Choice of The Texture Set simple,advanced,higher. Mandatory: True. Default Value: "simple"
[group] -simple
[group] -advanced
[group] -higher
Limitations
None
Authors
OTB-Team
See Also
otbScalarImageToTexturesFilter, otbScalarImageToAdvancedTexturesFilter and otbScalarImageToHigherOrderTexturesFilter classes
Example of use
in: qb_RoadExtract.tif
channel: 2
parameters.xrad: 3
parameters.yrad: 3
texture: simple
out: HaralickTextures.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/HomologousPointsExtraction.html b/python/plugins/processing/otb/description/doc/HomologousPointsExtraction.html
new file mode 100644
index 000000000000..715e7865806a
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/HomologousPointsExtraction.html
@@ -0,0 +1,5 @@
+
+
+
HomologousPointsExtraction
Brief Description
Allows to compute homologous points between images using keypoints
Tags
Feature Extraction
Long Description
This application allows to compute homologous points between images using keypoints. SIFT or SURF keypoints can be used and the band on which keypoints are computed can be set independantly for both images. The application offers two modes : the first is the full mode where keypoints are extracted from the full extent of both images (please note that in this mode large image file are not supported). The second mode, called geobins, allows to set-up spatial binning so as to get fewer points spread accross the entire image. In this mode, the corresponding spatial bin in the second image is estimated using geographical transform or sensor modelling, and is padded according to the user defined precision. Last, in both modes the application can filter matches whose colocalisation in first image exceed this precision. The elevation parameters are to deal more precisely with sensor modelling in case of sensor geometry data. The outvector option allows to create a vector file with segments corresponding to the localisation error between the matches. It can be useful to assess the precision of a registration for instance.
[param] -band2 <int32> Index of the band from input image 1 to use for keypoints extraction. Mandatory: True. Default Value: "1"
[param] -threshold <float> The distance threshold for matching.. Mandatory: True. Default Value: "0.6"
[param] -backmatching <boolean> If set to true, matches should be consistent in both ways.. Mandatory: False. Default Value: "True"
[param] -precision <float> Estimated precision of the colocalisation function in pixels. Mandatory: True. Default Value: "0"
[param] -mfilter <boolean> If enabled, this option allows to filter matches according to colocalisation from sensor or geographical information, using the given tolerancy expressed in pixels. Mandatory: False. Default Value: "True"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -out <string> File containing the list of tie points. Mandatory: True. Default Value: ""
[param] -mode.geobins.binsize <int32> Radius of the spatial bin in pixels. Mandatory: True. Default Value: "256"
[param] -mode.geobins.binstep <int32> Steps between bins in pixels. Mandatory: True. Default Value: "256"
Limitations
Full mode does not handle large images.
Authors
OTB-Team
See Also
RefineSensorModel
Example of use
in1: sensor_stereo_left.tif
in2: sensor_stereo_right.tif
mode: full
out: homologous.txt
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/HooverCompareSegmentation.html b/python/plugins/processing/otb/description/doc/HooverCompareSegmentation.html
new file mode 100644
index 000000000000..c77e2f782b5f
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/HooverCompareSegmentation.html
@@ -0,0 +1,7 @@
+
+
+
HooverCompareSegmentation
Brief Description
Compare two segmentations with Hoover metrics
Tags
Segmentation
Long Description
This application compares a machine segmentation (MS) with a partial ground truth segmentation (GT). The Hoover metrics are used to estimate scores for correct detection, over-segmentation, under-segmentation and missed detection.
+ The application can output the overall Hoover scores along with coloredimages of the MS and GT segmentation showing the state of each region (correct detection, over-segmentation, under-segmentation, missed)
+ The Hoover metrics are described in : Hoover et al., "An experimental comparison of range image segmentation algorithms", IEEE PAMI vol. 18, no. 7, July 1996.
Parameters
[param] -ingt <string> A partial ground truth segmentation image.. Mandatory: True. Default Value: ""
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/HyperspectralUnmixing.html b/python/plugins/processing/otb/description/doc/HyperspectralUnmixing.html
index 1a18144439a8..9488cfe65887 100644
--- a/python/plugins/processing/otb/description/doc/HyperspectralUnmixing.html
+++ b/python/plugins/processing/otb/description/doc/HyperspectralUnmixing.html
@@ -1,4 +1,8 @@
-
Hyperspectral data unmixing
Brief Description
Estimate abundance maps from an hyperspectral image and a set of endmembers.
Tags
Hyperspectral
Long Description
The application applies a linear unmixing algorithm to an hyperspectral data cube. This method supposes that the mixture between materials in the scene is macroscopic and simulate a linear mixing model of spectra.
-The Linear Mixing Model (LMM) acknowledges that reflectance spectrum associated with each pixel is a linear combination of pure materials in the recovery area, commonly known as endmembers.Endmembers can be estimated using the VertexComponentAnalysis application.
-The application allows to estimate the abundance maps with several algorithms : Unconstrained Least Square (ucls), Fully Constrained Least Square (fcls),Image Space Reconstruction Algorithm (isra) and Non-negative constrained Least Square (ncls) and Minimum Dispersion Constrained Non Negative Matrix Factorization (MDMDNMF).
-
Parameters
[param] Input Image Filename (-in): The hyperspectral data cube to unmix
[param] Output Image (-out): The output abundance map
[param] Input endmembers (-ie): The endmembers (estimated pure pixels) to use for unmixing. Must be stored as a multispectral image, where each pixel is interpreted as an endmember
[choice] Unmixing algorithm (-ua): The algorithm to use for unmixing
[group] UCLS: Unconstrained Least Square
[group] FCLS: Fully constrained Least Square
[group] NCLS: Non-negative constrained Least Square
[group] ISRA: Image Space Reconstruction Algorithm
[group] MDMDNMF: Minimum Dispersion Constrained Non Negative Matrix Factorization
Estimate abundance maps from an hyperspectral image and a set of endmembers.
Tags
Hyperspectral
Long Description
The application applies a linear unmixing algorithm to an hyperspectral data cube. This method supposes that the mixture between materials in the scene is macroscopic and simulates a linear mixing model of spectra.
+The Linear Mixing Model (LMM) acknowledges that reflectance spectrum associated with each pixel is a linear combination of pure materials in the recovery area, commonly known as endmembers. Endmembers can be estimated using the VertexComponentAnalysis application.
+The application allows to estimate the abundance maps with several algorithms : Unconstrained Least Square (ucls), Fully Constrained Least Square (fcls), Image Space Reconstruction Algorithm (isra) and Non-negative constrained Least Square (ncls) and Minimum Dispertion Constrained Non Negative Matrix Factorization (MDMDNMF).
+
Parameters
[param] -in <string> The hyperspectral data cube to unmix. Mandatory: True. Default Value: ""
[param] -ie <string> The endmembers (estimated pure pixels) to use for unmixing. Must be stored as a multispectral image, where each pixel is interpreted as an endmember. Mandatory: True. Default Value: ""
[choice] -ua The algorithm to use for unmixing ucls,ncls,isra,mdmdnmf. Mandatory: False. Default Value: "ucls"
[group] -ucls
[group] -ncls
[group] -isra
[group] -mdmdnmf
Limitations
None
Authors
OTB-Team
See Also
VertexComponentAnalysis
Example of use
in: cupriteSubHsi.tif
ie: cupriteEndmembers.tif
out: HyperspectralUnmixing.tif double
ua: ucls
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ImageClassifier.html b/python/plugins/processing/otb/description/doc/ImageClassifier.html
new file mode 100644
index 000000000000..52d60c4e9026
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ImageClassifier.html
@@ -0,0 +1,5 @@
+
+
+
ImageClassifier
Brief Description
Performs a classification of the input image according to a model file.
Tags
Learning
Long Description
This application performs an image classification based on a model file produced by the TrainImagesClassifier application. Pixels of the output image will contain the class labels decided by the classifier (maximal class label = 65535). The input pixels can be optionally centered and reduced according to the statistics file produced by the ComputeImagesStatistics application. An optional input mask can be provided, in which case only input image pixels whose corresponding mask value is greater than 0 will be classified. The remaining of pixels will be given the label 0 in the output image.
Parameters
[param] -in <string> The input image to classify.. Mandatory: True. Default Value: ""
[param] -mask <string> The mask allows to restrict classification of the input image to the area where mask pixel values are greater than 0.. Mandatory: False. Default Value: ""
[param] -model <string> A model file (produced by TrainImagesClassifier application, maximal class label = 65535).. Mandatory: True. Default Value: ""
[param] -imstat <string> A XML file containing mean and standard deviation to center and reduce samples before classification (produced by ComputeImagesStatistics application).. Mandatory: False. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
The input image must have the same type, order and number of bands than the images used to produce the statistics file and the SVM model file. If a statistics file was used during training by the TrainImagesClassifier, it is mandatory to use the same statistics file for classification. If an input mask is used, its size must match the input image size.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ImageEnvelope.html b/python/plugins/processing/otb/description/doc/ImageEnvelope.html
index 7e16d0ca211a..a60cb53ef65a 100644
--- a/python/plugins/processing/otb/description/doc/ImageEnvelope.html
+++ b/python/plugins/processing/otb/description/doc/ImageEnvelope.html
@@ -1 +1,5 @@
-
Image Envelope Application
Brief Description
Extracts an image envelope.
Tags
Geometry
Long Description
Build a vector data containing the polygon of the image envelope.
Parameters
[param] Input Image (-in): Input image.
[param] Output Vector Data (-out): Vector data file containing the envelope
[param] AverageElevation (-ae): If no DEM is used, provide the height value (default is 0 meters)
[param] DEMDirectory (-dem): Use DEM tiles to derive height values (AverageElevation option is ignored in this case)
[param] Projection (-proj): Projection to be used to compute the envelope (default is WGS84)
[param] -out <string> Vector data file containing the envelope. Mandatory: True. Default Value: ""
[param] -sr <int32> Sampling rate for image edges (in pixel). Mandatory: False. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -proj <string> Projection to be used to compute the envelope (default is WGS84). Mandatory: False. Default Value: ""
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: QB_TOULOUSE_MUL_Extract_500_500.tif
out: ImageEnvelope.shp
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/KMeansClassification.html b/python/plugins/processing/otb/description/doc/KMeansClassification.html
index c72198818eca..182c1a17836b 100644
--- a/python/plugins/processing/otb/description/doc/KMeansClassification.html
+++ b/python/plugins/processing/otb/description/doc/KMeansClassification.html
@@ -1 +1,5 @@
-
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: QB_1_ortho.tif
ts: 1000
nc: 5
maxit: 1000
ct: 0.0001
out: ClassificationFilterOutput.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/KmzExport.html b/python/plugins/processing/otb/description/doc/KmzExport.html
index 04d38658d45f..6f7701b64e05 100644
--- a/python/plugins/processing/otb/description/doc/KmzExport.html
+++ b/python/plugins/processing/otb/description/doc/KmzExport.html
@@ -1 +1,5 @@
-
Image to KMZ Export Application
Brief Description
Export the input image in a KMZ product.
Tags
KMZ, Export
Long Description
This application exports the input image in a kmz product that can be display in the Google Earth software. The user can set the size of the product size, a logo and a legend to the product. Furthemore, to obtain a product that fits the relief, a DEM can be used.
This application exports the input image in a kmz product that can be display in the Google Earth software. The user can set the size of the product size, a logo and a legend to the product. Furthemore, to obtain a product that fits the relief, a DEM can be used.
[param] -tilesize <int32> Size of the tiles in the kmz product, in number of pixels.. Mandatory: False. Default Value: "0"
[param] -logo <string> Path to the image logo to add to the KMZ product.. Mandatory: False. Default Value: ""
[param] -legend <string> Path to the image legend to add to the KMZ product.. Mandatory: False. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
Limitations
None
Authors
OTB-Team
See Also
Conversion
Example of use
in: qb_RoadExtract2.tif
out: otbKmzExport.kmz
logo: otb_big.png
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/LSMSSegmentation.html b/python/plugins/processing/otb/description/doc/LSMSSegmentation.html
new file mode 100644
index 000000000000..03151c9dd5f8
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/LSMSSegmentation.html
@@ -0,0 +1,5 @@
+
+
+
LSMSSegmentation
Brief Description
Second step of the exact Large-Scale Mean-Shift segmentation workflow.
Tags
Segmentation,LSMS
Long Description
This application performs the second step of the exact Large-Scale Mean-Shift segmentation workflow (LSMS). Filtered range image and spatial image should be created with the MeanShiftSmoothing application, with modesearch parameter disabled. If spatial image is not set, the application will only process the range image and spatial radius parameter will not be taken into account. This application will produce a labeled image where neighbor pixels whose range distance is below range radius (and optionally spatial distance below spatial radius) will be grouped together into the same cluster. For large images one can use the nbtilesx and nbtilesy parameters for tile-wise processing, with the guarantees of identical results. Please note that this application will generate a lot of temporary files (as many as the number of tiles), and will therefore require twice the size of the final result in term of disk space. The cleanup option (activated by default) allows to remove all temporary file as soon as they are not needed anymore (if cleanup is activated, tmpdir set and tmpdir does not exists before running the application, it will be removed as well during cleanup). The tmpdir option allows to define a directory where to write the temporary files. Please also note that the output image type should be set to uint32 to ensure that there are enough labels available.
[param] -inpos <string> The spatial image. Spatial input is the displacement map (output of the Adaptive MeanShift Smoothing application).. Mandatory: False. Default Value: ""
[param] -out <string> The output image. The output image is the segmentation of the filtered image. It is recommended to set the pixel type to uint32.. Mandatory: True. Default Value: ""
[param] -ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: False. Default Value: "15"
[param] -spatialr <float> Spatial radius of the neighborhood.. Mandatory: False. Default Value: "5"
[param] -minsize <int32> Minimum Region Size. If, after the segmentation, a region is of size lower than this criterion, the region is deleted.. Mandatory: False. Default Value: "0"
[param] -tilesizex <int32> Size of tiles along the X-axis.. Mandatory: True. Default Value: "500"
[param] -tilesizey <int32> Size of tiles along the Y-axis.. Mandatory: True. Default Value: "500"
[param] -tmpdir <string> This applications need to write temporary files for each tile. This parameter allows choosing the path where to write those files. If disabled, the current path will be used.. Mandatory: False. Default Value: ""
[param] -cleanup <boolean> If activated, the application will try to clean all temporary files it created. Mandatory: False. Default Value: "True"
[param] -inxml <string> Load otb application from xml file. Mandatory: False. Default Value: ""
[param] -outxml <string> Save otb application to xml file. Mandatory: False. Default Value: ""
Limitations
This application is part of the Large-Scale Mean-Shift segmentation workflow (LSMS) and may not be suited for any other purpose.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/LSMSSmallRegionsMerging.html b/python/plugins/processing/otb/description/doc/LSMSSmallRegionsMerging.html
new file mode 100644
index 000000000000..b293794cb450
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/LSMSSmallRegionsMerging.html
@@ -0,0 +1,5 @@
+
+
+
LSMSSmallRegionsMerging
Brief Description
Third (optional) step of the exact Large-Scale Mean-Shift segmentation workflow.
Tags
Segmentation,LSMS
Long Description
This application performs the third step of the exact Large-Scale Mean-Shift segmentation workflow (LSMS). Given a segmentation result (label image) and the original image, it will merge regions whose size in pixels is lower than minsize parameter with the adjacent regions with the adjacent region with closest radiometry and acceptable size. Small regions will be processed by size: first all regions of area, which is equal to 1 pixel will be merged with adjacent region, then all regions of area equal to 2 pixels, until regions of area minsize. For large images one can use the nbtilesx and nbtilesy parameters for tile-wise processing, with the guarantees of identical results.
[param] -inseg <string> The segmented image input. Segmented image input is the segmentation of the input image.. Mandatory: True. Default Value: ""
[param] -out <string> The output image. The output image is the input image where the minimal regions have been merged.. Mandatory: True. Default Value: ""
[param] -minsize <int32> Minimum Region Size. If, after the segmentation, a region is of size lower than this criterion, the region is merged with the "nearest" region (radiometrically).. Mandatory: False. Default Value: "50"
[param] -tilesizex <int32> Size of tiles along the X-axis.. Mandatory: True. Default Value: "500"
[param] -tilesizey <int32> Size of tiles along the Y-axis.. Mandatory: True. Default Value: "500"
[param] -inxml <string> Load otb application from xml file. Mandatory: False. Default Value: ""
[param] -outxml <string> Save otb application to xml file. Mandatory: False. Default Value: ""
Limitations
This application is part of the Large-Scale Mean-Shift segmentation workflow (LSMS) and may not be suited for any other purpose.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/LSMSVectorization.html b/python/plugins/processing/otb/description/doc/LSMSVectorization.html
new file mode 100644
index 000000000000..5414a039181e
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/LSMSVectorization.html
@@ -0,0 +1,5 @@
+
+
+
LSMSVectorization
Brief Description
Fourth step of the exact Large-Scale Mean-Shift segmentation workflow.
Tags
Segmentation,LSMS
Long Description
This application performs the fourth step of the exact Large-Scale Mean-Shift segmentation workflow (LSMS). Given a segmentation result (label image), that may have been processed for small regions merging or not, it will convert it to a GIS vector file containing one polygon per segment. Each polygon contains additional fields: mean and variance of each channels from input image (in parameter), segmentation image label, number of pixels in the polygon. For large images one can use the nbtilesx and nbtilesy parameters for tile-wise processing, with the guarantees of identical results.
[param] -inseg <string> The segmented image input. Segmented image input is the segmentation of the input image.. Mandatory: True. Default Value: ""
[param] -out <string> The output GIS vector file, representing the vectorized version of the segmented image where the features of the polygons are the radiometric means and variances.. Mandatory: True. Default Value: ""
[param] -tilesizex <int32> Size of tiles along the X-axis.. Mandatory: True. Default Value: "500"
[param] -tilesizey <int32> Size of tiles along the Y-axis.. Mandatory: True. Default Value: "500"
[param] -inxml <string> Load otb application from xml file. Mandatory: False. Default Value: ""
[param] -outxml <string> Save otb application to xml file. Mandatory: False. Default Value: ""
Limitations
This application is part of the Large-Scale Mean-Shift segmentation workflow (LSMS) and may not be suited for any other purpose.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/LineSegmentDetection.html b/python/plugins/processing/otb/description/doc/LineSegmentDetection.html
index 0745d677fb2f..4e8939cb1dec 100644
--- a/python/plugins/processing/otb/description/doc/LineSegmentDetection.html
+++ b/python/plugins/processing/otb/description/doc/LineSegmentDetection.html
@@ -1,3 +1,7 @@
-
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -norescale <boolean> By default, the input image amplitude is rescaled between [0,255]. Turn on this parameter to skip rescaling. Mandatory: False. Default Value: "True"
Limitations
None
Authors
OTB-Team
See Also
On Line demonstration of the LSD algorithm is available here: http://www.ipol.im/pub/algo/gjmr_line_segment_detector/
+
Example of use
in: QB_Suburb.png
out: LineSegmentDetection.shp
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/LocalStatisticExtraction.html b/python/plugins/processing/otb/description/doc/LocalStatisticExtraction.html
new file mode 100644
index 000000000000..390f34a9f127
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/LocalStatisticExtraction.html
@@ -0,0 +1,5 @@
+
+
+
LocalStatisticExtraction
Brief Description
Computes local statistical moments on every pixel in the selected channel of the input image
Tags
Statistics,Feature Extraction
Long Description
This application computes the 4 local statistical moments on every pixel in the selected channel of the input image, over a specified neighborhood. The output image is multi band with one statistical moment (feature) per band. Thus, the 4 output features are the Mean, the Variance, the Skewness and the Kurtosis. They are provided in this exact order in the output image.
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
This application performs mean shift fitlering (multi-threaded).
-
Parameters
-
This section describes in details the parameters available for this application. Table 4.29, page 396
-presents a summary of these parameters and the parameters keys to be used in command-line and
-programming languages. Application key is MeanShiftSmoothing.
-
-
-
-
-
-
-
-
Parameter key
Parameter type
Parameter description
-
in
Input image
Input Image
-
fout
Output image
Filtered output
-
spatialr
Int
Spatial radius
-
ranger
Float
Range radius
-
thres
Float
Mode convergence threshold
-
maxiter
Int
Maximum number of iterations
-
modesearch
Boolean
Mode search.
-
-
-
Table 4.29: Parameters table for Mean Shift filtering.
-
-
-
-
-
-
Input Image: The input image.
-
-
Filtered output: The filtered output image.
-
-
Spatial radius: Spatial radius of the neighborhood.
-
-
Range radius: Range radius defining the radius (expressed in radiometry unit) in the
- multi-spectral space.
-
-
Mode convergence threshold: Algorithm iterative scheme will stop if mean-shift vector is
- below this threshold or if iteration number reached maximum number of iterations.
-
-
Maximum number of iterations: Algorithm iterative scheme will stop if convergence hasn’t
- been reached after the maximum number of iterations.
-
-
Mode search.: If activated pixel iterative convergence is stopped if the path . Be careful, with
- this option, the result will slightly depend on thread number
-
Example
-
To run this example in command-line, use the following:
-
[param] -spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: False. Default Value: "0.1"
[param] -maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: False. Default Value: "100"
[param] -modesearch <boolean> If activated pixel iterative convergence is stopped if the path . Be careful, with this option, the result will slightly depend on thread number. Mandatory: False. Default Value: "True"
Limitations
With mode search option, the result will slightly depend on thread number.
Authors
OTB-Team
See Also
Example of use
in: maur_rgb.png
fout: MeanShift_FilterOutput.tif
spatialr: 16
ranger: 16
thres: 0.1
maxiter: 100
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/MultiResolutionPyramid.html b/python/plugins/processing/otb/description/doc/MultiResolutionPyramid.html
index 02f4fb384bc1..b704492f009d 100644
--- a/python/plugins/processing/otb/description/doc/MultiResolutionPyramid.html
+++ b/python/plugins/processing/otb/description/doc/MultiResolutionPyramid.html
@@ -1 +1,5 @@
-
This application builds a multi-resolution pyramid of the input image.
Parameters
[param] Input Image (-in):
[param] Output Image (-out): will be used to get the prefix and the extension of the images to write
[param] Available RAM (-ram): Available RAM
[param] Number Of Levels (-level): Number of levels in the pyramid (default is 1).
[param] Subsampling factor (-sfactor):
[param] Subsampling factor (-vfactor):
[param] Use Fast Scheme (-fast): If used, this option allows to speed-up computation by iteratively subsampling previous level of pyramid instead of processing the full input.
This application builds a multi-resolution pyramid of the input image. User can specified the number of levels of the pyramid and the subsampling factor. To speed up the process, you can use the fast scheme option
[param] -out <string> will be used to get the prefix and the extension of the images to write. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -level <int32> Number of levels in the pyramid (default is 1).. Mandatory: True. Default Value: "1"
[param] -sfactor <int32> Subsampling factor between each level of the pyramid (default is 2).. Mandatory: True. Default Value: "2"
[param] -vfactor <float> Variance factor use in smoothing. It is multiplied by the subsampling factor of each level in the pyramid (default is 0.6).. Mandatory: True. Default Value: "0.6"
[param] -fast <boolean> If used, this option allows to speed-up computation by iteratively subsampling previous level of pyramid instead of processing the full input.. Mandatory: False. Default Value: "True"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: QB_Toulouse_Ortho_XS.tif
out: multiResolutionImage.tif
level: 1
sfactor: 2
vfactor: 0.6
fast: false
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/MultivariateAlterationDetector.html b/python/plugins/processing/otb/description/doc/MultivariateAlterationDetector.html
index 19caa09b6a91..9c403a2b5d34 100644
--- a/python/plugins/processing/otb/description/doc/MultivariateAlterationDetector.html
+++ b/python/plugins/processing/otb/description/doc/MultivariateAlterationDetector.html
@@ -1 +1,21 @@
-
Multivariate alteration detector
Brief Description
Multivariate Alteration Detector
Tags
Feature Extraction
Long Description
This application detects change between two given images.
Parameters
[param] Input Image 1 (-in1):
[param] Input Image 2 (-in2):
[param] Change Map (-out): Image of detected changes.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
None
Authors
OTB-Team
See Also
This filter implements the Multivariate Alteration Detector, based on the following work:
+ A. A. Nielsen and K. Conradsen, Multivariate alteration detection (mad) in multispectral, bi-temporal image data: a new approach to change detection studies, Remote Sens. Environ., vol. 64, pp. 1-19, (1998)
+
+ Multivariate Alteration Detector takes two images as inputs and produce a set of N change maps as a VectorImage (where N is the maximum of number of bands in first and second image) with the following properties:
+ - Change maps are differences of a pair of linear combinations of bands from image 1 and bands from image 2 chosen to maximize the correlation.
+ - Each change map is orthogonal to the others.
+
+ This is a statistical method which can handle different modalities and even different bands and number of bands between images.
+
+ If numbers of bands in image 1 and 2 are equal, then change maps are sorted by increasing correlation. If number of bands is different, the change maps are sorted by decreasing correlation.
+
+ The GetV1() and GetV2() methods allow to retrieve the linear combinations used to generate the Mad change maps as a vnl_matrix of double, and the GetRho() method allows to retrieve the correlation associated to each Mad change maps as a vnl_vector.
+
+ This filter has been implemented from the Matlab code kindly made available by the authors here:
+ http://www2.imm.dtu.dk/~aa/software.html
+
+ Both cases (same and different number of bands) have been validated by comparing the output image to the output produced by the Matlab code, and the reference images for testing have been generated from the Matlab code using Octave.
Example of use
in1: Spot5-Gloucester-before.tif
in2: Spot5-Gloucester-after.tif
out: detectedChangeImage.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OSMDownloader.html b/python/plugins/processing/otb/description/doc/OSMDownloader.html
index c7699906778d..c8e91234efcd 100644
--- a/python/plugins/processing/otb/description/doc/OSMDownloader.html
+++ b/python/plugins/processing/otb/description/doc/OSMDownloader.html
@@ -1,2 +1,6 @@
-
Open Street Map layers importations applications
Brief Description
Generate a vector data from OSM on the input image extend
Tags
Image MetaData
Long Description
Generate a vector data from Open Street Map data. A DEM could be use. By default, the entire layer is downloaded, an image can be use as support for the OSM data. The application can provide also available classes in layers . This application required an Internet access. Informations about the OSM project : http://www.openstreetmap.fr/
Parameters
[param] Output vector data (-out): Generated output vector data path
[param] Support image (-support): Image used as support to estimate the models
[param] OSM tag key (-key): OSM tag value to extract (motorway, footway...)
[param] OSM tag value (-value):
[param] DEM directory (-dem): Path to the directory that contains elevation information.
[param] option to display available key/value classes (-printclasses): Print the key/value classes available for the bounding box of the input image
- ** If not used : Note that the options OSMKey (-key) and Output (-out) become mandatory
Generate a vector data from OSM on the input image extend
Tags
Image MetaData
Long Description
Generate a vector data from Open Street Map data. A DEM could be use. By default, the entire layer is downloaded, an image can be use as support for the OSM data. The application can provide also available classes in layers . This application required an Internet access. Informations about the OSM project : http://www.openstreetmap.fr/
[param] -support <string> Image used as support to estimate the models. Mandatory: True. Default Value: ""
[param] -key <string> OSM tag key to extract (highway, building...). Mandatory: True. Default Value: ""
[param] -value <string> OSM tag value to extract (motorway, footway...). Mandatory: False. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -printclasses <boolean> Print the key/value classes available for the bounding box of the input image
+ ** If not used : Note that the options OSMKey (-key) and Output (-out) become mandatory. Mandatory: False. Default Value: "True"
Limitations
None
Authors
OTB-Team
See Also
Convertion
Example of use
support: qb_RoadExtract.tif
key: highway
out: apTvUtOSMDownloader.shp
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ObtainUTMZoneFromGeoPoint.html b/python/plugins/processing/otb/description/doc/ObtainUTMZoneFromGeoPoint.html
new file mode 100644
index 000000000000..fba3ce365548
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/ObtainUTMZoneFromGeoPoint.html
@@ -0,0 +1,5 @@
+
+
+
ObtainUTMZoneFromGeoPoint
Brief Description
UTM zone determination from a geographic point.
Tags
Coordinates
Long Description
This application returns the UTM zone of an input geographic point.
Parameters
[param] -lat <float> Latitude value of desired point.. Mandatory: True. Default Value: "0.0"
[param] -lon <float> Longitude value of desired point.. Mandatory: True. Default Value: "0.0"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OpticalCalibration.html b/python/plugins/processing/otb/description/doc/OpticalCalibration.html
index bbd7a1575580..808dc9ce4465 100644
--- a/python/plugins/processing/otb/description/doc/OpticalCalibration.html
+++ b/python/plugins/processing/otb/description/doc/OpticalCalibration.html
@@ -1,6 +1,10 @@
-
Optical calibration application
Brief Description
Perform optical calibration TOA/TOC (Top Of Atmosphere/Top Of Canopy). Supported sensors: QuickBird, Ikonos, WorldView2, Formosat, Spot5
Tags
Calibration
Long Description
The application allows to convert pixel values from DN (for Digital Numbers) to physically interpretable and comparable values.Calibrated values are called surface reflectivity and its values lie in the range [0, 1].
+
+
+
OpticalCalibration
Brief Description
Perform optical calibration TOA/TOC (Top Of Atmosphere/Top Of Canopy). Supported sensors: QuickBird, Ikonos, WorldView2, Formosat, Spot5, Pleiades
Tags
Calibration
Long Description
The application allows to convert pixel values from DN (for Digital Numbers) to physically interpretable and comparable values. Calibrated values are called surface reflectivity and its values lie in the range [0, 1].
The first level is called Top Of Atmosphere (TOA) reflectivity. It takes into account the sensor gain, sensor spectral response and the solar illumination.
The second level is called Top Of Canopy (TOC) reflectivity. In addition to sensor gain and solar illumination, it takes into account the optical thickness of the atmosphere, the atmospheric pressure, the water vapor amount, the ozone amount, as well as the composition and amount of aerosol gasses.
-It is also possible to indicate an AERONET file which contains atmospheric parameters (version 1 and version 2 of Aeronet file are supported.
[param] Convert to milli reflectance (-milli): Output milli-reflectance instead of reflectance.
-This allows to put save the image in integer pixel type (in the range [0, 1000] instead of floating point in the range [0, 1].
[param] Relative Spectral Response File (-rsr): Sensor relative spectral response file
-By default the application gets these informations in the metadata
[group] Atmospheric parameters (-atmo): This group allows to set the atmospheric parameters.
otbcli_OpticalCalibration -in WV2_MUL_ROI_1000_100.tif -out OpticalCalibration.tif -level toa
\ No newline at end of file
+It is also possible to indicate an AERONET file which contains atmospheric parameters (version 1 and version 2 of Aeronet file are supported.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -milli <boolean> Flag to use milli-reflectance instead of reflectance.
+This allows to save the image with integer pixel type (in the range [0, 1000] instead of floating point in the range [0, 1]. In order to do that, use this option and set the output pixel type (-out filename uint16 for example). Mandatory: False. Default Value: "True"
[param] -clamp <boolean> Clamping in the range [0, 100]. It can be useful to preserve area with specular reflectance.. Mandatory: False. Default Value: "True"
[param] -rsr <string> Sensor relative spectral response file
+By default the application gets these informations in the metadata. Mandatory: False. Default Value: ""
[param] -atmo <string> This group allows to set the atmospheric parameters.. Mandatory: True. Default Value: "0"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OrthoRectification-epsg.html b/python/plugins/processing/otb/description/doc/OrthoRectification-epsg.html
new file mode 100644
index 000000000000..b9540dfdda96
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/OrthoRectification-epsg.html
@@ -0,0 +1,7 @@
+
+
+
OrthoRectification
Brief Description
This application allows to ortho-rectify optical images from supported sensors.
+
Tags
Geometry
Long Description
An inverse sensor model is built from the input image metadata to convert geographical to raw geometry coordinates. This inverse sensor model is then combined with the chosen map projection to build a global coordinate mapping grid. Last, this grid is used to resample using the chosen interpolation algorithm. A Digital Elevation Model can be specified to account for terrain deformations.
+In case of SPOT5 images, the sensor model can be approximated by an RPC model in order to speed-up computation.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -outputs <string> This group of parameters allows to define the grid on which the input image will be resampled.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -opt <string> This group of parameters allows to optimize processing time.. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. bco,nn,linear. Mandatory: True. Default Value: "bco"
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Ortho-rectification chapter from the OTB Software Guide
Example of use
io.in: QB_TOULOUSE_MUL_Extract_500_500.tif
io.out: QB_Toulouse_ortho.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OrthoRectification-fit-to-ortho.html b/python/plugins/processing/otb/description/doc/OrthoRectification-fit-to-ortho.html
new file mode 100644
index 000000000000..b9540dfdda96
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/OrthoRectification-fit-to-ortho.html
@@ -0,0 +1,7 @@
+
+
+
OrthoRectification
Brief Description
This application allows to ortho-rectify optical images from supported sensors.
+
Tags
Geometry
Long Description
An inverse sensor model is built from the input image metadata to convert geographical to raw geometry coordinates. This inverse sensor model is then combined with the chosen map projection to build a global coordinate mapping grid. Last, this grid is used to resample using the chosen interpolation algorithm. A Digital Elevation Model can be specified to account for terrain deformations.
+In case of SPOT5 images, the sensor model can be approximated by an RPC model in order to speed-up computation.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -outputs <string> This group of parameters allows to define the grid on which the input image will be resampled.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -opt <string> This group of parameters allows to optimize processing time.. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. bco,nn,linear. Mandatory: True. Default Value: "bco"
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Ortho-rectification chapter from the OTB Software Guide
Example of use
io.in: QB_TOULOUSE_MUL_Extract_500_500.tif
io.out: QB_Toulouse_ortho.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OrthoRectification-lambert-WGS84.html b/python/plugins/processing/otb/description/doc/OrthoRectification-lambert-WGS84.html
new file mode 100644
index 000000000000..b9540dfdda96
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/OrthoRectification-lambert-WGS84.html
@@ -0,0 +1,7 @@
+
+
+
OrthoRectification
Brief Description
This application allows to ortho-rectify optical images from supported sensors.
+
Tags
Geometry
Long Description
An inverse sensor model is built from the input image metadata to convert geographical to raw geometry coordinates. This inverse sensor model is then combined with the chosen map projection to build a global coordinate mapping grid. Last, this grid is used to resample using the chosen interpolation algorithm. A Digital Elevation Model can be specified to account for terrain deformations.
+In case of SPOT5 images, the sensor model can be approximated by an RPC model in order to speed-up computation.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -outputs <string> This group of parameters allows to define the grid on which the input image will be resampled.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -opt <string> This group of parameters allows to optimize processing time.. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. bco,nn,linear. Mandatory: True. Default Value: "bco"
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Ortho-rectification chapter from the OTB Software Guide
Example of use
io.in: QB_TOULOUSE_MUL_Extract_500_500.tif
io.out: QB_Toulouse_ortho.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OrthoRectification-utm.html b/python/plugins/processing/otb/description/doc/OrthoRectification-utm.html
new file mode 100644
index 000000000000..b9540dfdda96
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/OrthoRectification-utm.html
@@ -0,0 +1,7 @@
+
+
+
OrthoRectification
Brief Description
This application allows to ortho-rectify optical images from supported sensors.
+
Tags
Geometry
Long Description
An inverse sensor model is built from the input image metadata to convert geographical to raw geometry coordinates. This inverse sensor model is then combined with the chosen map projection to build a global coordinate mapping grid. Last, this grid is used to resample using the chosen interpolation algorithm. A Digital Elevation Model can be specified to account for terrain deformations.
+In case of SPOT5 images, the sensor model can be approximated by an RPC model in order to speed-up computation.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -outputs <string> This group of parameters allows to define the grid on which the input image will be resampled.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -opt <string> This group of parameters allows to optimize processing time.. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. bco,nn,linear. Mandatory: True. Default Value: "bco"
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Ortho-rectification chapter from the OTB Software Guide
Example of use
io.in: QB_TOULOUSE_MUL_Extract_500_500.tif
io.out: QB_Toulouse_ortho.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/OrthoRectification.html b/python/plugins/processing/otb/description/doc/OrthoRectification.html
index 153ba595ac8c..b9540dfdda96 100644
--- a/python/plugins/processing/otb/description/doc/OrthoRectification.html
+++ b/python/plugins/processing/otb/description/doc/OrthoRectification.html
@@ -1,3 +1,7 @@
-
Ortho-rectification application
Brief Description
This application allows to ortho-rectify optical images from supported sensors.
-
Tags
Image Manipulation, Geometry
Long Description
An inverse sensor model is built from the input image metadata to convert geographical to raw geometry coordinates. This inverse sensor model is then combined with the chosen map projection to build a global coordinate mapping grid. Last, this grid is used to resample using the chosen interpolation algorithm. A Digital Elevation Model can be specified to account for terrain deformations.
-In case of SPOT5 images, the sensor model can be approximated by an RPC model in order to speed-up computation.
Parameters
[group] Input and output data (-io): This group of parameters allows to set the input and output images.
[param] Input Image (-in): The input image to ortho-rectify
[param] Output Image (-out): The ortho-rectified output image
[choice] Output Map Projection (-map): Parameters of the ouptut map projection.
[group] Universal Trans-Mercator (UTM): A system of transverse mercator projections dividing the surface of Earth between 80S and 84N latitude.
[param] Zone number (-zone): The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere)
[param] Northern Hemisphere (-hem): The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.
[group] Lambert II Etendu: This is a Lambert Conformal Conic projection mainly used in France.
[group] EPSG Code: This code is a generic way of identifying map projections, and allows to specify a large amount of them. See www.spatialreference.org to find which EPSG code is associated to your projection;
[param] EPSG Code (-code): See www.spatialreference.org to find which EPSG code is associated to your projection
[group] Output Image Grid (-outputs): This group of parameters allows to define the grid on which the input image will be resampled.
[choice] Parameters estimation modes (-mode):
[group] User Defined: This mode allows you to fully modify default values.
[group] Automatic Size from Spacing: This mode allows you to automatically compute the optimal image size from given spacing (pixel size) values
[group] Automatic Spacing from Size: This mode allows you to automatically compute the optimal image spacing (pixel size) from the given size
[param] Upper Left X (-ulx): Cartographic X coordinate of upper-left corner (meters for cartographic projections, degrees for geographic ones)
[param] Upper Left Y (-uly): Cartographic Y coordinate of the upper-left corner (meters for cartographic projections, degrees for geographic ones)
[param] Size X (-sizex): Size of projected image along X (in pixels)
[param] Size Y (-sizey): Size of projected image along Y (in pixels)
[param] Pixel Size X (-spacingx): Size of each pixel along X axis (meters for cartographic projections, degrees for geographic ones)
[param] Pixel Size Y (-spacingy): Size of each pixel along Y axis (meters for cartographic projections, degrees for geographic ones)
[param] Force isotropic spacing by default (-isotropic): Default spacing (pixel size) values are estimated from the sensor modeling of the image. It can therefore result in a non-isotropic spacing. This option allows you to force default values to be isotropic (in this case, the minimum of spacing in both direction is applied. Values overriden by user are not affected by this option.
[group] Elevation management (-elev): This group of parameters allows to manage elevation values in the ortho-rectification process.
[param] DEM directory (-dem): This parameter allows to select a directory containing Digital Elevation Model tiles. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application
[choice] Interpolation (-interpolator): This group of parameters allows to define how the input image will be interpolated during resampling.
[group] Nearest Neighbor interpolation: Nearest neighbor interpolation leads to poor image quality, but it is very fast.
[group] Linear interpolation: Linear interpolation leads to average image quality but is quite fast
[group] Bicubic interpolation:
[param] Radius for bicubic interpolation (-radius): This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.
[group] Speed optimization parameters (-opt): This group of parameters allows to optimize processing time.
[param] RPC modeling (points per axis) (-rpc): Enabling RPC modeling allows to speed-up SPOT5 ortho-rectification. Value is the number of control points per axis for RPC estimation
[param] Available memory for processing (in MB) (-ram): This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities)
[param] Resampling grid spacing (-gridspacing): Resampling is done according to a coordinate mapping grid, whose pixel size is set by this parameter. The closer to the output spacing this parameter is, the more precise will be the ortho-rectified image, but increasing this parameter allows to reduce processing time.
Limitations
Supported sensors are SPOT5 (TIF format), Ikonos, Quickbird, Worldview2, GeoEye.
Authors
OTB-Team
See also
Ortho-rectification chapter from the OTB Software Guide
Example of use
Parameters to set value:
Command line to execute:
otbcli_OrthoRectification
\ No newline at end of file
+
+
+
OrthoRectification
Brief Description
This application allows to ortho-rectify optical images from supported sensors.
+
Tags
Geometry
Long Description
An inverse sensor model is built from the input image metadata to convert geographical to raw geometry coordinates. This inverse sensor model is then combined with the chosen map projection to build a global coordinate mapping grid. Last, this grid is used to resample using the chosen interpolation algorithm. A Digital Elevation Model can be specified to account for terrain deformations.
+In case of SPOT5 images, the sensor model can be approximated by an RPC model in order to speed-up computation.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -outputs <string> This group of parameters allows to define the grid on which the input image will be resampled.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -opt <string> This group of parameters allows to optimize processing time.. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. bco,nn,linear. Mandatory: True. Default Value: "bco"
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Ortho-rectification chapter from the OTB Software Guide
Example of use
io.in: QB_TOULOUSE_MUL_Extract_500_500.tif
io.out: QB_Toulouse_ortho.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Pansharpening-bayes.html b/python/plugins/processing/otb/description/doc/Pansharpening-bayes.html
new file mode 100644
index 000000000000..414545c6e5bc
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Pansharpening-bayes.html
@@ -0,0 +1,5 @@
+
+
+
Pansharpening
Brief Description
Perform P+XS pansharpening
Tags
Geometry,Pansharpening
Long Description
This application performs P+XS pansharpening. Pansharpening is a process of merging high-resolution panchromatic and lower resolution multispectral imagery to create a single high-resolution color image. Algorithms available in the applications are: RCS, bayesian fusion and Local Mean and Variance Matching(LMVM).
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -method Selection of the pan-sharpening method. rcs,lmvm,bayes. Mandatory: True. Default Value: "rcs"
[group] -rcs
[group] -lmvm
[param] -method.lmvm.radiusx <int32> Set the x radius of the sliding window.. Mandatory: True. Default Value: "3"
[param] -method.lmvm.radiusy <int32> Set the y radius of the sliding window.. Mandatory: True. Default Value: "3"
[group] -bayes
[param] -method.bayes.lambda <float> Set the weighting value.. Mandatory: True. Default Value: "0.9999"
[param] -method.bayes.s <float> Set the S coefficient.. Mandatory: True. Default Value: "1"
Limitations
None
Authors
OTB-Team
See Also
Example of use
inp: QB_Toulouse_Ortho_PAN.tif
inxs: QB_Toulouse_Ortho_XS.tif
out: Pansharpening.tif uint16
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Pansharpening-lmvm.html b/python/plugins/processing/otb/description/doc/Pansharpening-lmvm.html
new file mode 100644
index 000000000000..414545c6e5bc
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Pansharpening-lmvm.html
@@ -0,0 +1,5 @@
+
+
+
Pansharpening
Brief Description
Perform P+XS pansharpening
Tags
Geometry,Pansharpening
Long Description
This application performs P+XS pansharpening. Pansharpening is a process of merging high-resolution panchromatic and lower resolution multispectral imagery to create a single high-resolution color image. Algorithms available in the applications are: RCS, bayesian fusion and Local Mean and Variance Matching(LMVM).
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -method Selection of the pan-sharpening method. rcs,lmvm,bayes. Mandatory: True. Default Value: "rcs"
[group] -rcs
[group] -lmvm
[param] -method.lmvm.radiusx <int32> Set the x radius of the sliding window.. Mandatory: True. Default Value: "3"
[param] -method.lmvm.radiusy <int32> Set the y radius of the sliding window.. Mandatory: True. Default Value: "3"
[group] -bayes
[param] -method.bayes.lambda <float> Set the weighting value.. Mandatory: True. Default Value: "0.9999"
[param] -method.bayes.s <float> Set the S coefficient.. Mandatory: True. Default Value: "1"
Limitations
None
Authors
OTB-Team
See Also
Example of use
inp: QB_Toulouse_Ortho_PAN.tif
inxs: QB_Toulouse_Ortho_XS.tif
out: Pansharpening.tif uint16
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Pansharpening-rcs.html b/python/plugins/processing/otb/description/doc/Pansharpening-rcs.html
new file mode 100644
index 000000000000..414545c6e5bc
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Pansharpening-rcs.html
@@ -0,0 +1,5 @@
+
+
+
Pansharpening
Brief Description
Perform P+XS pansharpening
Tags
Geometry,Pansharpening
Long Description
This application performs P+XS pansharpening. Pansharpening is a process of merging high-resolution panchromatic and lower resolution multispectral imagery to create a single high-resolution color image. Algorithms available in the applications are: RCS, bayesian fusion and Local Mean and Variance Matching(LMVM).
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -method Selection of the pan-sharpening method. rcs,lmvm,bayes. Mandatory: True. Default Value: "rcs"
[group] -rcs
[group] -lmvm
[param] -method.lmvm.radiusx <int32> Set the x radius of the sliding window.. Mandatory: True. Default Value: "3"
[param] -method.lmvm.radiusy <int32> Set the y radius of the sliding window.. Mandatory: True. Default Value: "3"
[group] -bayes
[param] -method.bayes.lambda <float> Set the weighting value.. Mandatory: True. Default Value: "0.9999"
[param] -method.bayes.s <float> Set the S coefficient.. Mandatory: True. Default Value: "1"
Limitations
None
Authors
OTB-Team
See Also
Example of use
inp: QB_Toulouse_Ortho_PAN.tif
inxs: QB_Toulouse_Ortho_XS.tif
out: Pansharpening.tif uint16
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Pansharpening.html b/python/plugins/processing/otb/description/doc/Pansharpening.html
new file mode 100644
index 000000000000..414545c6e5bc
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Pansharpening.html
@@ -0,0 +1,5 @@
+
+
+
Pansharpening
Brief Description
Perform P+XS pansharpening
Tags
Geometry,Pansharpening
Long Description
This application performs P+XS pansharpening. Pansharpening is a process of merging high-resolution panchromatic and lower resolution multispectral imagery to create a single high-resolution color image. Algorithms available in the applications are: RCS, bayesian fusion and Local Mean and Variance Matching(LMVM).
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -method Selection of the pan-sharpening method. rcs,lmvm,bayes. Mandatory: True. Default Value: "rcs"
[group] -rcs
[group] -lmvm
[param] -method.lmvm.radiusx <int32> Set the x radius of the sliding window.. Mandatory: True. Default Value: "3"
[param] -method.lmvm.radiusy <int32> Set the y radius of the sliding window.. Mandatory: True. Default Value: "3"
[group] -bayes
[param] -method.bayes.lambda <float> Set the weighting value.. Mandatory: True. Default Value: "0.9999"
[param] -method.bayes.s <float> Set the S coefficient.. Mandatory: True. Default Value: "1"
Limitations
None
Authors
OTB-Team
See Also
Example of use
inp: QB_Toulouse_Ortho_PAN.tif
inxs: QB_Toulouse_Ortho_XS.tif
out: Pansharpening.tif uint16
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/PixelValue.html b/python/plugins/processing/otb/description/doc/PixelValue.html
new file mode 100644
index 000000000000..ada52dc12bd5
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/PixelValue.html
@@ -0,0 +1,6 @@
+
+
+
PixelValue
Brief Description
Get the value of a pixel.
Tags
Utilities,Coordinates,Raster
Long Description
Get the value of a pixel.
+Pay attention, index starts at 0.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Quicklook.html b/python/plugins/processing/otb/description/doc/Quicklook.html
index 932956154e03..7eecc2c879f4 100644
--- a/python/plugins/processing/otb/description/doc/Quicklook.html
+++ b/python/plugins/processing/otb/description/doc/Quicklook.html
@@ -1,2 +1,7 @@
-
Quick Look Application
Brief Description
Generates a subsampled version of an image extract
Tags
Image Manipulation
Long Description
Generates a subsampled version of an extract of an image defined by ROIStart and ROISize.
- This extract is subsampled using the ration OR the output image Size
Parameters
[param] Input Image (-in): The image to read
[param] Output Image (-out): The subsampled image.
[param] Channel List (-cl): Selected channels
[param] ROI Origin X (-rox): first point of ROI in x-direction
[param] ROI Origin Y (-roy): first point of ROI in y-direction
[param] ROI Size X (-rsx): size of ROI in x-direction
[param] ROI Size Y (-rsy): size of ROI in y-direction
[param] Sampling ratio (-sr): Sampling Ratio, default is 2
[param] Size X (-sx): quicklook size in x-direction (used if no sampling ration is given)
[param] Size Y (-sy): quicklook size in y-direction (used if no sampling ration is given)
Generates a subsampled version of an image extract
Tags
Image Manipulation
Long Description
Generates a subsampled version of an extract of an image defined by ROIStart and ROISize.
+ This extract is subsampled using the ratio OR the output image Size.
Parameters
[param] -in <string> The image to read. Mandatory: True. Default Value: ""
This application does not provide yet the optimal way to decode coarser level of resolution from JPEG2000 images (like in Monteverdi).
+Trying to subsampled huge JPEG200 image with the application will lead to poor performances for now.
Authors
OTB-Team
See Also
Example of use
in: qb_RoadExtract.tif
out: quicklookImage.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/RadiometricIndices.html b/python/plugins/processing/otb/description/doc/RadiometricIndices.html
new file mode 100644
index 000000000000..c05f1b956497
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/RadiometricIndices.html
@@ -0,0 +1,25 @@
+
+
+
RadiometricIndices
Brief Description
Compute radiometric indices.
Tags
Radiometric Indices,Feature Extraction
Long Description
This application computes radiometric indices using the relevant channels of the input image. The output is a multi band image into which each channel is one of the selected indices.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Rasterization.html b/python/plugins/processing/otb/description/doc/Rasterization.html
index b50aba4351bc..ece033d5b68e 100644
--- a/python/plugins/processing/otb/description/doc/Rasterization.html
+++ b/python/plugins/processing/otb/description/doc/Rasterization.html
@@ -1 +1,6 @@
-
Rasterization Application
Brief Description
Reproject and Rasterize a Vector Data.
Tags
Vector data Manipulation
Long Description
Reproject and Rasterize a Vector Data.
Parameters
[param] InputVectorData (-in): The input vector data to be rasterized
[param] OutputImage (-out): An output image containing the rasterized vector data
[param] InputImage (-im): A reference image for extraction region and projection information (optional)
[param] SizeX (-szx): OutputSize[0] (useless if support image is given)
[param] SizeY (-szy): OutputSize[1] (useless if support image is given)
[param] RSID (-epsg): Projection System RSID number (RSID 4326 for WGS84 32631 for UTM31N) (useless if support image is given)
[param] OriginX (-orx): OutputOrigin[0] (useless if support image is given)
[param] OriginY (-ory): OutputOrigin[1] (useless if support image is given)
[param] SpacingX (-spx): OutputSpacing[0] (useless if support image is given)
[param] SpacingY (-spy): OutputSpacing[1] (useless if support image is given)
This application allows to reproject and rasterize a vector dataset. The grid of the rasterized output can be set by using a reference image, or by setting all parmeters (origin, size, spacing) by hand. In the latter case, at least the spacing (ground sampling distance) is needed (other parameters are computed automatically). The rasterized output can also be in a different projection reference system than the input dataset.
+ There are two rasterize mode available in the application. The first is the binary mode: it allows to render all pixels belonging to a geometry of the input dataset in the foreground color, while rendering the other in background color. The second one allows to render pixels belonging to a geometry woth respect to an attribute of this geometry. The field of the attribute to render can be set by the user. In the second mode, the background value is still used for unassociated pixels.
Parameters
[param] -in <string> The input vector dataset to be rasterized. Mandatory: True. Default Value: ""
[param] -out <string> An output image containing the rasterized vector dataset. Mandatory: True. Default Value: ""
[param] -im <string> A reference image from which to import output grid and projection reference system information.. Mandatory: False. Default Value: ""
[param] -szx <int32> Output size along x axis (useless if support image is given). Mandatory: False. Default Value: "0"
[param] -szy <int32> Output size along y axis (useless if support image is given). Mandatory: False. Default Value: "0"
[param] -epsg <int32> EPSG code for the output projection reference system (EPSG 4326 for WGS84, 32631 for UTM31N...,useless if support image is given). Mandatory: False. Default Value: "0"
[param] -orx <float> Output upper-left x coordinate (useless if support image is given). Mandatory: False. Default Value: "0.0"
[param] -ory <float> Output upper-left y coordinate (useless if support image is given). Mandatory: False. Default Value: "0.0"
[param] -spx <float> Spacing (ground sampling distance) along x axis (useless if support image is given). Mandatory: False. Default Value: "0.0"
[param] -spy <float> Spacing (ground sampling distance) along y axis (useless if support image is given). Mandatory: False. Default Value: "0.0"
[param] -background <float> Default value for pixels not belonging to any geometry. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -mode.binary.foreground <float> Value for pixels inside a geometry. Mandatory: True. Default Value: "255"
[group] -attribute
[param] -mode.attribute.field <string> Name of the attribute field to burn. Mandatory: True. Default Value: "DN"
Limitations
None
Authors
OTB-Team
See Also
For now, support of input dataset with multiple layers having different projection reference system is limited.
Example of use
in: qb_RoadExtract_classification.shp
out: rasterImage.tif
spx: 1.
spy: 1.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/ReadImageInfo.html b/python/plugins/processing/otb/description/doc/ReadImageInfo.html
index 5fbe804824f2..576123b7ae9e 100644
--- a/python/plugins/processing/otb/description/doc/ReadImageInfo.html
+++ b/python/plugins/processing/otb/description/doc/ReadImageInfo.html
@@ -1 +1,5 @@
-
Read image information Application
Brief Description
Get information about the image
Tags
Util, Image MetaData
Long Description
Display informations about the image in the logger tab.
Parameters
[param] Input Image (-in):
Limitations
None
Authors
OTB-Team
See also
Example of use
Parameters to set value:
Input Image: QB_Toulouse_Ortho_XS.tif
Command line to execute:
otbcli_ReadImageInfo -in QB_Toulouse_Ortho_XS.tif
\ No newline at end of file
+
+
+
ReadImageInfo
Brief Description
Get information about the image
Tags
Utilities,Image Manipulation,Image MetaData
Long Description
Display information about the input image like: image size, origin, spacing, metadata, projections...
[param] -keywordlist <boolean> Output the OSSIM keyword list. It contains metadata information (sensor model, geometry ). Informations are stored in keyword list (pairs of key/value). Mandatory: False. Default Value: "True"
[param] -outkwl <string> This option allows to extract the OSSIM keywordlist of the image into a geom file.. Mandatory: False. Default Value: ""
[param] -gcp <string> This group of parameters allows to access to the GCPs informations.. Mandatory: True. Default Value: "0"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: QB_Toulouse_Ortho_XS.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/RefineSensorModel.html b/python/plugins/processing/otb/description/doc/RefineSensorModel.html
new file mode 100644
index 000000000000..0376cade1342
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/RefineSensorModel.html
@@ -0,0 +1,5 @@
+
+
+
RefineSensorModel
Brief Description
Perform least-square fit of a sensor model to a set of tie points
Tags
Geometry
Long Description
This application reads a geom file containing a sensor model and a text file containing a list of ground control point, and performs a least-square fit of the sensor model adjustable parameters to these tie points. It produces an updated geom file as output, as well as an optional ground control points based statistics file and a vector file containing residues. The output geom file can then be used to ortho-rectify the data more accurately. Plaease note that for a proper use of the application, elevation must be correctly set (including DEM and geoid file). The map parameters allows to choose a map projection in which the accuracy will be estimated in meters.
Parameters
[param] -ingeom <string> Geom file containing the sensor model to refine. Mandatory: True. Default Value: ""
[param] -inpoints <string> Input file containing tie points. Points are stored in following format: row col lon lat. Line beginning with # are ignored.. Mandatory: True. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "utm"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
Limitations
None
Authors
OTB-Team
See Also
OrthoRectification,HomologousPointsExtraction
Example of use
ingeom: input.geom
outgeom: output.geom
inpoints: points.txt
map: epsg
map.epsg.code: 32631
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Rescale.html b/python/plugins/processing/otb/description/doc/Rescale.html
index d52ea82cec8c..54e0c26923a5 100644
--- a/python/plugins/processing/otb/description/doc/Rescale.html
+++ b/python/plugins/processing/otb/description/doc/Rescale.html
@@ -1 +1,5 @@
-
Rescale Image Application
Brief Description
Rescale the image between two given values.
Tags
Image Manipulation
Long Description
This application scale the given image pixel intensity between two given values. By default min (resp. max) value is set to 0 (resp. 255).
Parameters
[param] Input Image (-in): The image to scale.
[param] Output Image (-out): The rescaled image filename.
[param] Available RAM (-ram): Available RAM
[param] Output min value (-outmin): Minimum value of the output image.
[param] Output max value (-outmax): Maximum value of the output image.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -outmin <float> Minimum value of the output image.. Mandatory: False. Default Value: "0"
[param] -outmax <float> Maximum value of the output image.. Mandatory: False. Default Value: "255"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: QB_Toulouse_Ortho_PAN.tif
out: rescaledImage.png uchar
outmin: 0
outmax: 255
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/RigidTransformResample-id.html b/python/plugins/processing/otb/description/doc/RigidTransformResample-id.html
new file mode 100644
index 000000000000..241b4346d913
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/RigidTransformResample-id.html
@@ -0,0 +1,5 @@
+
+
+
RigidTransformResample
Brief Description
Resample an image with a rigid transform
Tags
Conversion,Geometry
Long Description
This application performs a parametric transform on the input image. Scaling, translation and rotation with scaling factor are handled. Parameters of the transform is expressed in physical units, thus particular attention must be paid on pixel size (value, and sign). Moreover transform is expressed from input space to output space (on the contrary ITK Transforms are expressed form output space to input space).
Parameters
[param] -in <string> The input image to translate.. Mandatory: True. Default Value: ""
[param] -transform <string> This group of parameters allows to set the transformation to apply.. Mandatory: True. Default Value: "0"
[param] -ram <int32> This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities). Mandatory: False. Default Value: "128"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. nn,linear,bco. Mandatory: True. Default Value: "bco"
[group] -nn
[group] -linear
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Limitations
None
Authors
OTB-Team
See Also
Translation
Example of use
in: qb_toulouse_sub.tif
out: rigitTransformImage.tif
transform.type: rotation
transform.type.rotation.angle: 20
transform.type.rotation.scalex: 2.
transform.type.rotation.scaley: 2.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/RigidTransformResample-rotation.html b/python/plugins/processing/otb/description/doc/RigidTransformResample-rotation.html
new file mode 100644
index 000000000000..241b4346d913
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/RigidTransformResample-rotation.html
@@ -0,0 +1,5 @@
+
+
+
RigidTransformResample
Brief Description
Resample an image with a rigid transform
Tags
Conversion,Geometry
Long Description
This application performs a parametric transform on the input image. Scaling, translation and rotation with scaling factor are handled. Parameters of the transform is expressed in physical units, thus particular attention must be paid on pixel size (value, and sign). Moreover transform is expressed from input space to output space (on the contrary ITK Transforms are expressed form output space to input space).
Parameters
[param] -in <string> The input image to translate.. Mandatory: True. Default Value: ""
[param] -transform <string> This group of parameters allows to set the transformation to apply.. Mandatory: True. Default Value: "0"
[param] -ram <int32> This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities). Mandatory: False. Default Value: "128"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. nn,linear,bco. Mandatory: True. Default Value: "bco"
[group] -nn
[group] -linear
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Limitations
None
Authors
OTB-Team
See Also
Translation
Example of use
in: qb_toulouse_sub.tif
out: rigitTransformImage.tif
transform.type: rotation
transform.type.rotation.angle: 20
transform.type.rotation.scalex: 2.
transform.type.rotation.scaley: 2.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/RigidTransformResample-translation.html b/python/plugins/processing/otb/description/doc/RigidTransformResample-translation.html
new file mode 100644
index 000000000000..241b4346d913
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/RigidTransformResample-translation.html
@@ -0,0 +1,5 @@
+
+
+
RigidTransformResample
Brief Description
Resample an image with a rigid transform
Tags
Conversion,Geometry
Long Description
This application performs a parametric transform on the input image. Scaling, translation and rotation with scaling factor are handled. Parameters of the transform is expressed in physical units, thus particular attention must be paid on pixel size (value, and sign). Moreover transform is expressed from input space to output space (on the contrary ITK Transforms are expressed form output space to input space).
Parameters
[param] -in <string> The input image to translate.. Mandatory: True. Default Value: ""
[param] -transform <string> This group of parameters allows to set the transformation to apply.. Mandatory: True. Default Value: "0"
[param] -ram <int32> This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities). Mandatory: False. Default Value: "128"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. nn,linear,bco. Mandatory: True. Default Value: "bco"
[group] -nn
[group] -linear
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Limitations
None
Authors
OTB-Team
See Also
Translation
Example of use
in: qb_toulouse_sub.tif
out: rigitTransformImage.tif
transform.type: rotation
transform.type.rotation.angle: 20
transform.type.rotation.scalex: 2.
transform.type.rotation.scaley: 2.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/RigidTransformResample.html b/python/plugins/processing/otb/description/doc/RigidTransformResample.html
index 653de92c70ef..241b4346d913 100644
--- a/python/plugins/processing/otb/description/doc/RigidTransformResample.html
+++ b/python/plugins/processing/otb/description/doc/RigidTransformResample.html
@@ -1,2 +1,5 @@
-
Image resampling with a rigid transform
Brief Description
Resample an image with a rigid transform
Tags
Conversion, Geometry
Long Description
This application performs an translation on the input image.
- Parameters of the translation can be set with tx and ty options.
Parameters
[param] Input image (-in):
[param] Output image (-out):
[param] The X translation (in physical units) (-tx):
[param] The Y translation (in physical units) (-ty):
This application performs a parametric transform on the input image. Scaling, translation and rotation with scaling factor are handled. Parameters of the transform is expressed in physical units, thus particular attention must be paid on pixel size (value, and sign). Moreover transform is expressed from input space to output space (on the contrary ITK Transforms are expressed form output space to input space).
Parameters
[param] -in <string> The input image to translate.. Mandatory: True. Default Value: ""
[param] -transform <string> This group of parameters allows to set the transformation to apply.. Mandatory: True. Default Value: "0"
[param] -ram <int32> This allows to set the maximum amount of RAM available for processing. As the writing task is time consuming, it is better to write large pieces of data, which can be achieved by increasing this parameter (pay attention to your system capabilities). Mandatory: False. Default Value: "128"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. nn,linear,bco. Mandatory: True. Default Value: "bco"
[group] -nn
[group] -linear
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
Limitations
None
Authors
OTB-Team
See Also
Translation
Example of use
in: qb_toulouse_sub.tif
out: rigitTransformImage.tif
transform.type: rotation
transform.type.rotation.angle: 20
transform.type.rotation.scalex: 2.
transform.type.rotation.scaley: 2.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/SFSTextureExtraction.html b/python/plugins/processing/otb/description/doc/SFSTextureExtraction.html
new file mode 100644
index 000000000000..9d2f9ff24828
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/SFSTextureExtraction.html
@@ -0,0 +1,5 @@
+
+
+
SFSTextureExtraction
Brief Description
Computes Structural Feature Set textures on every pixel of the input image selected channel
Tags
Textures,Feature Extraction
Long Description
This application computes SFS textures on a mono band image
Parameters
[param] -in <string> The input image to compute the features on.. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -parameters <string> This group of parameters allows to define SFS texture parameters. The available texture features are SFS'Length, SFS'Width, SFS'PSI, SFS'W-Mean, SFS'Ratio and SFS'SD. They are provided in this exact order in the output image.. Mandatory: True. Default Value: "0"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/SOMClassification.html b/python/plugins/processing/otb/description/doc/SOMClassification.html
new file mode 100644
index 000000000000..e5a6742423ca
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/SOMClassification.html
@@ -0,0 +1,5 @@
+
+
+
[param] -out <string> Output classified image (each pixel contains the index of its corresponding vector in the SOM).. Mandatory: True. Default Value: ""
[param] -vm <string> Validity mask (only pixels corresponding to a mask value greater than 0 will be used for learning). Mandatory: False. Default Value: ""
[param] -tp <float> Probability for a sample to be selected in the training set. Mandatory: False. Default Value: "1"
[param] -ts <int32> Maximum training set size (in pixels). Mandatory: False. Default Value: "0"
[param] -sl <int32> Number of lines in each streaming block (used during data sampling). Mandatory: False. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: QB_1_ortho.tif
out: SOMClassification.tif
tp: 1.0
ts: 16384
sl: 32
sx: 32
sy: 32
nx: 10
ny: 10
ni: 5
bi: 1.0
bf: 0.1
iv: 0
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/SarRadiometricCalibration.html b/python/plugins/processing/otb/description/doc/SarRadiometricCalibration.html
index b8d4e5ad1ab0..718529a9d725 100644
--- a/python/plugins/processing/otb/description/doc/SarRadiometricCalibration.html
+++ b/python/plugins/processing/otb/description/doc/SarRadiometricCalibration.html
@@ -1 +1,5 @@
-
SAR Radiometric calibration application
Brief Description
Perform SAR calibration on input complex images
Tags
Calibration, SAR
Long Description
This application performs SAR calibration on input complex images.
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[param] -noise <boolean> Flag to disable noise. Mandatory: False. Default Value: "True"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: RSAT_imagery_HH.tif
out: SarRadiometricCalibration.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Segmentation-cc.html b/python/plugins/processing/otb/description/doc/Segmentation-cc.html
new file mode 100644
index 000000000000..7fb053cc2c7c
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Segmentation-cc.html
@@ -0,0 +1,9 @@
+
+
+
Segmentation
Brief Description
Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
Tags
Segmentation
Long Description
This application allows to perform various segmentation algorithms on a multispectral image.Available segmentation algorithms are two different versions of Mean-Shift segmentation algorithm (one being multi-threaded), simple pixel based connected components according to a user-defined criterion, and watershed from the gradient of the intensity (norm of spectral bands vector). The application has two different modes that affects the nature of its output.
+
+In raster mode, the output of the application is a classical image of unique labels identifying the segmented regions. The labeled output can be passed to the ColorMapping application to render regions with contrasted colours. Please note that this mode loads the whole input image into memory, and as such can not handle large images.
+
+To segment large data, one can use the vector mode. In this case, the output of the application is a vector file or database. The input image is split into tiles (whose size can be set using the tilesize parameter), and each tile is loaded, segmented with the chosen algorithm, vectorized, and written into the output file or database. This piece-wise behavior ensure that memory will never get overloaded, and that images of any size can be processed. There are few more options in the vector mode. The simplify option allows to simplify the geometry (i.e. remove nodes in polygons) according to a user-defined tolerance. The stitch option allows to application to try to stitch together polygons corresponding to segmented region that may have been split by the tiling scheme.
Parameters
[param] -in <string> The input image to segment. Mandatory: True. Default Value: ""
[choice] -filter Choice of segmentation algorithm (mean-shift by default) meanshift,edison,cc,watershed,mprofiles. Mandatory: True. Default Value: "meanshift"
[group] -meanshift
[param] -filter.meanshift.spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -filter.meanshift.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multispectral space.. Mandatory: True. Default Value: "15"
[param] -filter.meanshift.thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: True. Default Value: "0.1"
[param] -filter.meanshift.maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: True. Default Value: "100"
[param] -filter.meanshift.minsize <int32> Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.. Mandatory: True. Default Value: "100"
[param] -filter.edison.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -filter.edison.minsize <int32> Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.. Mandatory: True. Default Value: "100"
[param] -filter.edison.scale <float> Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance). . Mandatory: True. Default Value: "1"
[group] -cc
[param] -filter.cc.expr <string> User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 ). Mandatory: True. Default Value: ""
[group] -watershed
[param] -filter.watershed.threshold <float> Depth threshold Units in percentage of the maximum depth in the image.. Mandatory: True. Default Value: "0.01"
[param] -filter.watershed.level <float> flood level for generating the merge tree from the initial segmentation (between 0 and 1). Mandatory: True. Default Value: "0.1"
[group] -mprofiles
[param] -filter.mprofiles.size <int32> Size of the profiles. Mandatory: True. Default Value: "5"
[param] -filter.mprofiles.start <int32> Initial radius of the structuring element (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.step <int32> Radius step along the profile (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.sigma <float> Profiles values under the threshold will be ignored.. Mandatory: True. Default Value: "1"
[choice] -mode Choice of processing mode, either raster or large-scale. vector,raster. Mandatory: True. Default Value: "vector"
[group] -vector
[param] -mode.vector.out <string> The output vector file or database (name can be anything understood by OGR). Mandatory: True. Default Value: ""
[param] -mode.vector.outmode <string> This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.. Mandatory: True. Default Value: "ulco"
[param] -mode.vector.inmask <string> Only pixels whose mask value is strictly positive will be segmented.. Mandatory: False. Default Value: ""
[param] -mode.vector.stitch <boolean> Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.. Mandatory: False. Default Value: "True"
[param] -mode.vector.minsize <int32> Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.. Mandatory: False. Default Value: "1"
[param] -mode.vector.simplify <float> Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.. Mandatory: False. Default Value: "0.1"
[param] -mode.vector.layername <string> Name of the layer in the vector file or database (default is Layer).. Mandatory: True. Default Value: "layer"
[param] -mode.vector.fieldname <string> Name of the field holding the geometry index in the output vector file or database.. Mandatory: True. Default Value: "DN"
[param] -mode.vector.tilesize <int32> User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.. Mandatory: True. Default Value: "1024"
[param] -mode.vector.startlabel <int32> Starting value of the geometry index field. Mandatory: True. Default Value: "1"
[param] -mode.vector.ogroptions <string> A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.. Mandatory: False. Default Value: ""
In raster mode, the application can not handle large input images. Stitching step of vector mode might become slow with very large input images. MeanShift filter results depends on threads number.
Authors
OTB-Team
See Also
MeanShiftSegmentation
Example of use
Example of use with vector mode and watershed segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: vector
mode.vector.out: SegmentationVector.sqlite
filter: watershed
Example of use with raster mode and mean-shift segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: raster
mode.raster.out: SegmentationRaster.tif uint16
filter: meanshift
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Segmentation-edison.html b/python/plugins/processing/otb/description/doc/Segmentation-edison.html
new file mode 100644
index 000000000000..7fb053cc2c7c
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Segmentation-edison.html
@@ -0,0 +1,9 @@
+
+
+
Segmentation
Brief Description
Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
Tags
Segmentation
Long Description
This application allows to perform various segmentation algorithms on a multispectral image.Available segmentation algorithms are two different versions of Mean-Shift segmentation algorithm (one being multi-threaded), simple pixel based connected components according to a user-defined criterion, and watershed from the gradient of the intensity (norm of spectral bands vector). The application has two different modes that affects the nature of its output.
+
+In raster mode, the output of the application is a classical image of unique labels identifying the segmented regions. The labeled output can be passed to the ColorMapping application to render regions with contrasted colours. Please note that this mode loads the whole input image into memory, and as such can not handle large images.
+
+To segment large data, one can use the vector mode. In this case, the output of the application is a vector file or database. The input image is split into tiles (whose size can be set using the tilesize parameter), and each tile is loaded, segmented with the chosen algorithm, vectorized, and written into the output file or database. This piece-wise behavior ensure that memory will never get overloaded, and that images of any size can be processed. There are few more options in the vector mode. The simplify option allows to simplify the geometry (i.e. remove nodes in polygons) according to a user-defined tolerance. The stitch option allows to application to try to stitch together polygons corresponding to segmented region that may have been split by the tiling scheme.
Parameters
[param] -in <string> The input image to segment. Mandatory: True. Default Value: ""
[choice] -filter Choice of segmentation algorithm (mean-shift by default) meanshift,edison,cc,watershed,mprofiles. Mandatory: True. Default Value: "meanshift"
[group] -meanshift
[param] -filter.meanshift.spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -filter.meanshift.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multispectral space.. Mandatory: True. Default Value: "15"
[param] -filter.meanshift.thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: True. Default Value: "0.1"
[param] -filter.meanshift.maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: True. Default Value: "100"
[param] -filter.meanshift.minsize <int32> Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.. Mandatory: True. Default Value: "100"
[param] -filter.edison.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -filter.edison.minsize <int32> Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.. Mandatory: True. Default Value: "100"
[param] -filter.edison.scale <float> Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance). . Mandatory: True. Default Value: "1"
[group] -cc
[param] -filter.cc.expr <string> User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 ). Mandatory: True. Default Value: ""
[group] -watershed
[param] -filter.watershed.threshold <float> Depth threshold Units in percentage of the maximum depth in the image.. Mandatory: True. Default Value: "0.01"
[param] -filter.watershed.level <float> flood level for generating the merge tree from the initial segmentation (between 0 and 1). Mandatory: True. Default Value: "0.1"
[group] -mprofiles
[param] -filter.mprofiles.size <int32> Size of the profiles. Mandatory: True. Default Value: "5"
[param] -filter.mprofiles.start <int32> Initial radius of the structuring element (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.step <int32> Radius step along the profile (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.sigma <float> Profiles values under the threshold will be ignored.. Mandatory: True. Default Value: "1"
[choice] -mode Choice of processing mode, either raster or large-scale. vector,raster. Mandatory: True. Default Value: "vector"
[group] -vector
[param] -mode.vector.out <string> The output vector file or database (name can be anything understood by OGR). Mandatory: True. Default Value: ""
[param] -mode.vector.outmode <string> This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.. Mandatory: True. Default Value: "ulco"
[param] -mode.vector.inmask <string> Only pixels whose mask value is strictly positive will be segmented.. Mandatory: False. Default Value: ""
[param] -mode.vector.stitch <boolean> Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.. Mandatory: False. Default Value: "True"
[param] -mode.vector.minsize <int32> Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.. Mandatory: False. Default Value: "1"
[param] -mode.vector.simplify <float> Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.. Mandatory: False. Default Value: "0.1"
[param] -mode.vector.layername <string> Name of the layer in the vector file or database (default is Layer).. Mandatory: True. Default Value: "layer"
[param] -mode.vector.fieldname <string> Name of the field holding the geometry index in the output vector file or database.. Mandatory: True. Default Value: "DN"
[param] -mode.vector.tilesize <int32> User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.. Mandatory: True. Default Value: "1024"
[param] -mode.vector.startlabel <int32> Starting value of the geometry index field. Mandatory: True. Default Value: "1"
[param] -mode.vector.ogroptions <string> A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.. Mandatory: False. Default Value: ""
In raster mode, the application can not handle large input images. Stitching step of vector mode might become slow with very large input images. MeanShift filter results depends on threads number.
Authors
OTB-Team
See Also
MeanShiftSegmentation
Example of use
Example of use with vector mode and watershed segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: vector
mode.vector.out: SegmentationVector.sqlite
filter: watershed
Example of use with raster mode and mean-shift segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: raster
mode.raster.out: SegmentationRaster.tif uint16
filter: meanshift
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Segmentation-meanshift.html b/python/plugins/processing/otb/description/doc/Segmentation-meanshift.html
new file mode 100644
index 000000000000..7fb053cc2c7c
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Segmentation-meanshift.html
@@ -0,0 +1,9 @@
+
+
+
Segmentation
Brief Description
Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
Tags
Segmentation
Long Description
This application allows to perform various segmentation algorithms on a multispectral image.Available segmentation algorithms are two different versions of Mean-Shift segmentation algorithm (one being multi-threaded), simple pixel based connected components according to a user-defined criterion, and watershed from the gradient of the intensity (norm of spectral bands vector). The application has two different modes that affects the nature of its output.
+
+In raster mode, the output of the application is a classical image of unique labels identifying the segmented regions. The labeled output can be passed to the ColorMapping application to render regions with contrasted colours. Please note that this mode loads the whole input image into memory, and as such can not handle large images.
+
+To segment large data, one can use the vector mode. In this case, the output of the application is a vector file or database. The input image is split into tiles (whose size can be set using the tilesize parameter), and each tile is loaded, segmented with the chosen algorithm, vectorized, and written into the output file or database. This piece-wise behavior ensure that memory will never get overloaded, and that images of any size can be processed. There are few more options in the vector mode. The simplify option allows to simplify the geometry (i.e. remove nodes in polygons) according to a user-defined tolerance. The stitch option allows to application to try to stitch together polygons corresponding to segmented region that may have been split by the tiling scheme.
Parameters
[param] -in <string> The input image to segment. Mandatory: True. Default Value: ""
[choice] -filter Choice of segmentation algorithm (mean-shift by default) meanshift,edison,cc,watershed,mprofiles. Mandatory: True. Default Value: "meanshift"
[group] -meanshift
[param] -filter.meanshift.spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -filter.meanshift.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multispectral space.. Mandatory: True. Default Value: "15"
[param] -filter.meanshift.thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: True. Default Value: "0.1"
[param] -filter.meanshift.maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: True. Default Value: "100"
[param] -filter.meanshift.minsize <int32> Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.. Mandatory: True. Default Value: "100"
[param] -filter.edison.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -filter.edison.minsize <int32> Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.. Mandatory: True. Default Value: "100"
[param] -filter.edison.scale <float> Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance). . Mandatory: True. Default Value: "1"
[group] -cc
[param] -filter.cc.expr <string> User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 ). Mandatory: True. Default Value: ""
[group] -watershed
[param] -filter.watershed.threshold <float> Depth threshold Units in percentage of the maximum depth in the image.. Mandatory: True. Default Value: "0.01"
[param] -filter.watershed.level <float> flood level for generating the merge tree from the initial segmentation (between 0 and 1). Mandatory: True. Default Value: "0.1"
[group] -mprofiles
[param] -filter.mprofiles.size <int32> Size of the profiles. Mandatory: True. Default Value: "5"
[param] -filter.mprofiles.start <int32> Initial radius of the structuring element (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.step <int32> Radius step along the profile (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.sigma <float> Profiles values under the threshold will be ignored.. Mandatory: True. Default Value: "1"
[choice] -mode Choice of processing mode, either raster or large-scale. vector,raster. Mandatory: True. Default Value: "vector"
[group] -vector
[param] -mode.vector.out <string> The output vector file or database (name can be anything understood by OGR). Mandatory: True. Default Value: ""
[param] -mode.vector.outmode <string> This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.. Mandatory: True. Default Value: "ulco"
[param] -mode.vector.inmask <string> Only pixels whose mask value is strictly positive will be segmented.. Mandatory: False. Default Value: ""
[param] -mode.vector.stitch <boolean> Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.. Mandatory: False. Default Value: "True"
[param] -mode.vector.minsize <int32> Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.. Mandatory: False. Default Value: "1"
[param] -mode.vector.simplify <float> Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.. Mandatory: False. Default Value: "0.1"
[param] -mode.vector.layername <string> Name of the layer in the vector file or database (default is Layer).. Mandatory: True. Default Value: "layer"
[param] -mode.vector.fieldname <string> Name of the field holding the geometry index in the output vector file or database.. Mandatory: True. Default Value: "DN"
[param] -mode.vector.tilesize <int32> User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.. Mandatory: True. Default Value: "1024"
[param] -mode.vector.startlabel <int32> Starting value of the geometry index field. Mandatory: True. Default Value: "1"
[param] -mode.vector.ogroptions <string> A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.. Mandatory: False. Default Value: ""
In raster mode, the application can not handle large input images. Stitching step of vector mode might become slow with very large input images. MeanShift filter results depends on threads number.
Authors
OTB-Team
See Also
MeanShiftSegmentation
Example of use
Example of use with vector mode and watershed segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: vector
mode.vector.out: SegmentationVector.sqlite
filter: watershed
Example of use with raster mode and mean-shift segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: raster
mode.raster.out: SegmentationRaster.tif uint16
filter: meanshift
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Segmentation-mprofiles.html b/python/plugins/processing/otb/description/doc/Segmentation-mprofiles.html
new file mode 100644
index 000000000000..7fb053cc2c7c
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Segmentation-mprofiles.html
@@ -0,0 +1,9 @@
+
+
+
Segmentation
Brief Description
Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
Tags
Segmentation
Long Description
This application allows to perform various segmentation algorithms on a multispectral image.Available segmentation algorithms are two different versions of Mean-Shift segmentation algorithm (one being multi-threaded), simple pixel based connected components according to a user-defined criterion, and watershed from the gradient of the intensity (norm of spectral bands vector). The application has two different modes that affects the nature of its output.
+
+In raster mode, the output of the application is a classical image of unique labels identifying the segmented regions. The labeled output can be passed to the ColorMapping application to render regions with contrasted colours. Please note that this mode loads the whole input image into memory, and as such can not handle large images.
+
+To segment large data, one can use the vector mode. In this case, the output of the application is a vector file or database. The input image is split into tiles (whose size can be set using the tilesize parameter), and each tile is loaded, segmented with the chosen algorithm, vectorized, and written into the output file or database. This piece-wise behavior ensure that memory will never get overloaded, and that images of any size can be processed. There are few more options in the vector mode. The simplify option allows to simplify the geometry (i.e. remove nodes in polygons) according to a user-defined tolerance. The stitch option allows to application to try to stitch together polygons corresponding to segmented region that may have been split by the tiling scheme.
Parameters
[param] -in <string> The input image to segment. Mandatory: True. Default Value: ""
[choice] -filter Choice of segmentation algorithm (mean-shift by default) meanshift,edison,cc,watershed,mprofiles. Mandatory: True. Default Value: "meanshift"
[group] -meanshift
[param] -filter.meanshift.spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -filter.meanshift.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multispectral space.. Mandatory: True. Default Value: "15"
[param] -filter.meanshift.thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: True. Default Value: "0.1"
[param] -filter.meanshift.maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: True. Default Value: "100"
[param] -filter.meanshift.minsize <int32> Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.. Mandatory: True. Default Value: "100"
[param] -filter.edison.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -filter.edison.minsize <int32> Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.. Mandatory: True. Default Value: "100"
[param] -filter.edison.scale <float> Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance). . Mandatory: True. Default Value: "1"
[group] -cc
[param] -filter.cc.expr <string> User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 ). Mandatory: True. Default Value: ""
[group] -watershed
[param] -filter.watershed.threshold <float> Depth threshold Units in percentage of the maximum depth in the image.. Mandatory: True. Default Value: "0.01"
[param] -filter.watershed.level <float> flood level for generating the merge tree from the initial segmentation (between 0 and 1). Mandatory: True. Default Value: "0.1"
[group] -mprofiles
[param] -filter.mprofiles.size <int32> Size of the profiles. Mandatory: True. Default Value: "5"
[param] -filter.mprofiles.start <int32> Initial radius of the structuring element (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.step <int32> Radius step along the profile (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.sigma <float> Profiles values under the threshold will be ignored.. Mandatory: True. Default Value: "1"
[choice] -mode Choice of processing mode, either raster or large-scale. vector,raster. Mandatory: True. Default Value: "vector"
[group] -vector
[param] -mode.vector.out <string> The output vector file or database (name can be anything understood by OGR). Mandatory: True. Default Value: ""
[param] -mode.vector.outmode <string> This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.. Mandatory: True. Default Value: "ulco"
[param] -mode.vector.inmask <string> Only pixels whose mask value is strictly positive will be segmented.. Mandatory: False. Default Value: ""
[param] -mode.vector.stitch <boolean> Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.. Mandatory: False. Default Value: "True"
[param] -mode.vector.minsize <int32> Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.. Mandatory: False. Default Value: "1"
[param] -mode.vector.simplify <float> Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.. Mandatory: False. Default Value: "0.1"
[param] -mode.vector.layername <string> Name of the layer in the vector file or database (default is Layer).. Mandatory: True. Default Value: "layer"
[param] -mode.vector.fieldname <string> Name of the field holding the geometry index in the output vector file or database.. Mandatory: True. Default Value: "DN"
[param] -mode.vector.tilesize <int32> User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.. Mandatory: True. Default Value: "1024"
[param] -mode.vector.startlabel <int32> Starting value of the geometry index field. Mandatory: True. Default Value: "1"
[param] -mode.vector.ogroptions <string> A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.. Mandatory: False. Default Value: ""
In raster mode, the application can not handle large input images. Stitching step of vector mode might become slow with very large input images. MeanShift filter results depends on threads number.
Authors
OTB-Team
See Also
MeanShiftSegmentation
Example of use
Example of use with vector mode and watershed segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: vector
mode.vector.out: SegmentationVector.sqlite
filter: watershed
Example of use with raster mode and mean-shift segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: raster
mode.raster.out: SegmentationRaster.tif uint16
filter: meanshift
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Segmentation-watershed.html b/python/plugins/processing/otb/description/doc/Segmentation-watershed.html
new file mode 100644
index 000000000000..7fb053cc2c7c
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Segmentation-watershed.html
@@ -0,0 +1,9 @@
+
+
+
Segmentation
Brief Description
Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
Tags
Segmentation
Long Description
This application allows to perform various segmentation algorithms on a multispectral image.Available segmentation algorithms are two different versions of Mean-Shift segmentation algorithm (one being multi-threaded), simple pixel based connected components according to a user-defined criterion, and watershed from the gradient of the intensity (norm of spectral bands vector). The application has two different modes that affects the nature of its output.
+
+In raster mode, the output of the application is a classical image of unique labels identifying the segmented regions. The labeled output can be passed to the ColorMapping application to render regions with contrasted colours. Please note that this mode loads the whole input image into memory, and as such can not handle large images.
+
+To segment large data, one can use the vector mode. In this case, the output of the application is a vector file or database. The input image is split into tiles (whose size can be set using the tilesize parameter), and each tile is loaded, segmented with the chosen algorithm, vectorized, and written into the output file or database. This piece-wise behavior ensure that memory will never get overloaded, and that images of any size can be processed. There are few more options in the vector mode. The simplify option allows to simplify the geometry (i.e. remove nodes in polygons) according to a user-defined tolerance. The stitch option allows to application to try to stitch together polygons corresponding to segmented region that may have been split by the tiling scheme.
Parameters
[param] -in <string> The input image to segment. Mandatory: True. Default Value: ""
[choice] -filter Choice of segmentation algorithm (mean-shift by default) meanshift,edison,cc,watershed,mprofiles. Mandatory: True. Default Value: "meanshift"
[group] -meanshift
[param] -filter.meanshift.spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -filter.meanshift.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multispectral space.. Mandatory: True. Default Value: "15"
[param] -filter.meanshift.thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: True. Default Value: "0.1"
[param] -filter.meanshift.maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: True. Default Value: "100"
[param] -filter.meanshift.minsize <int32> Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.. Mandatory: True. Default Value: "100"
[param] -filter.edison.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -filter.edison.minsize <int32> Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.. Mandatory: True. Default Value: "100"
[param] -filter.edison.scale <float> Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance). . Mandatory: True. Default Value: "1"
[group] -cc
[param] -filter.cc.expr <string> User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 ). Mandatory: True. Default Value: ""
[group] -watershed
[param] -filter.watershed.threshold <float> Depth threshold Units in percentage of the maximum depth in the image.. Mandatory: True. Default Value: "0.01"
[param] -filter.watershed.level <float> flood level for generating the merge tree from the initial segmentation (between 0 and 1). Mandatory: True. Default Value: "0.1"
[group] -mprofiles
[param] -filter.mprofiles.size <int32> Size of the profiles. Mandatory: True. Default Value: "5"
[param] -filter.mprofiles.start <int32> Initial radius of the structuring element (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.step <int32> Radius step along the profile (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.sigma <float> Profiles values under the threshold will be ignored.. Mandatory: True. Default Value: "1"
[choice] -mode Choice of processing mode, either raster or large-scale. vector,raster. Mandatory: True. Default Value: "vector"
[group] -vector
[param] -mode.vector.out <string> The output vector file or database (name can be anything understood by OGR). Mandatory: True. Default Value: ""
[param] -mode.vector.outmode <string> This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.. Mandatory: True. Default Value: "ulco"
[param] -mode.vector.inmask <string> Only pixels whose mask value is strictly positive will be segmented.. Mandatory: False. Default Value: ""
[param] -mode.vector.stitch <boolean> Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.. Mandatory: False. Default Value: "True"
[param] -mode.vector.minsize <int32> Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.. Mandatory: False. Default Value: "1"
[param] -mode.vector.simplify <float> Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.. Mandatory: False. Default Value: "0.1"
[param] -mode.vector.layername <string> Name of the layer in the vector file or database (default is Layer).. Mandatory: True. Default Value: "layer"
[param] -mode.vector.fieldname <string> Name of the field holding the geometry index in the output vector file or database.. Mandatory: True. Default Value: "DN"
[param] -mode.vector.tilesize <int32> User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.. Mandatory: True. Default Value: "1024"
[param] -mode.vector.startlabel <int32> Starting value of the geometry index field. Mandatory: True. Default Value: "1"
[param] -mode.vector.ogroptions <string> A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.. Mandatory: False. Default Value: ""
In raster mode, the application can not handle large input images. Stitching step of vector mode might become slow with very large input images. MeanShift filter results depends on threads number.
Authors
OTB-Team
See Also
MeanShiftSegmentation
Example of use
Example of use with vector mode and watershed segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: vector
mode.vector.out: SegmentationVector.sqlite
filter: watershed
Example of use with raster mode and mean-shift segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: raster
mode.raster.out: SegmentationRaster.tif uint16
filter: meanshift
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Segmentation.html b/python/plugins/processing/otb/description/doc/Segmentation.html
new file mode 100644
index 000000000000..7fb053cc2c7c
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Segmentation.html
@@ -0,0 +1,9 @@
+
+
+
Segmentation
Brief Description
Performs segmentation of an image, and output either a raster or a vector file. In vector mode, large input datasets are supported.
Tags
Segmentation
Long Description
This application allows to perform various segmentation algorithms on a multispectral image.Available segmentation algorithms are two different versions of Mean-Shift segmentation algorithm (one being multi-threaded), simple pixel based connected components according to a user-defined criterion, and watershed from the gradient of the intensity (norm of spectral bands vector). The application has two different modes that affects the nature of its output.
+
+In raster mode, the output of the application is a classical image of unique labels identifying the segmented regions. The labeled output can be passed to the ColorMapping application to render regions with contrasted colours. Please note that this mode loads the whole input image into memory, and as such can not handle large images.
+
+To segment large data, one can use the vector mode. In this case, the output of the application is a vector file or database. The input image is split into tiles (whose size can be set using the tilesize parameter), and each tile is loaded, segmented with the chosen algorithm, vectorized, and written into the output file or database. This piece-wise behavior ensure that memory will never get overloaded, and that images of any size can be processed. There are few more options in the vector mode. The simplify option allows to simplify the geometry (i.e. remove nodes in polygons) according to a user-defined tolerance. The stitch option allows to application to try to stitch together polygons corresponding to segmented region that may have been split by the tiling scheme.
Parameters
[param] -in <string> The input image to segment. Mandatory: True. Default Value: ""
[choice] -filter Choice of segmentation algorithm (mean-shift by default) meanshift,edison,cc,watershed,mprofiles. Mandatory: True. Default Value: "meanshift"
[group] -meanshift
[param] -filter.meanshift.spatialr <int32> Spatial radius of the neighborhood.. Mandatory: True. Default Value: "5"
[param] -filter.meanshift.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multispectral space.. Mandatory: True. Default Value: "15"
[param] -filter.meanshift.thres <float> Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if iteration number reached maximum number of iterations.. Mandatory: True. Default Value: "0.1"
[param] -filter.meanshift.maxiter <int32> Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.. Mandatory: True. Default Value: "100"
[param] -filter.meanshift.minsize <int32> Minimum size of a region (in pixel unit) in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry. If set to 0 no pruning is done.. Mandatory: True. Default Value: "100"
[param] -filter.edison.ranger <float> Range radius defining the radius (expressed in radiometry unit) in the multi-spectral space.. Mandatory: True. Default Value: "15"
[param] -filter.edison.minsize <int32> Minimum size of a region in segmentation. Smaller clusters will be merged to the neighboring cluster with the closest radiometry.. Mandatory: True. Default Value: "100"
[param] -filter.edison.scale <float> Scaling of the image before processing. This is useful for images with narrow decimal ranges (like [0,1] for instance). . Mandatory: True. Default Value: "1"
[group] -cc
[param] -filter.cc.expr <string> User defined connection condition, written as a mathematical expression. Available variables are p(i)b(i), intensity_p(i) and distance (example of expression : distance < 10 ). Mandatory: True. Default Value: ""
[group] -watershed
[param] -filter.watershed.threshold <float> Depth threshold Units in percentage of the maximum depth in the image.. Mandatory: True. Default Value: "0.01"
[param] -filter.watershed.level <float> flood level for generating the merge tree from the initial segmentation (between 0 and 1). Mandatory: True. Default Value: "0.1"
[group] -mprofiles
[param] -filter.mprofiles.size <int32> Size of the profiles. Mandatory: True. Default Value: "5"
[param] -filter.mprofiles.start <int32> Initial radius of the structuring element (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.step <int32> Radius step along the profile (in pixels). Mandatory: True. Default Value: "1"
[param] -filter.mprofiles.sigma <float> Profiles values under the threshold will be ignored.. Mandatory: True. Default Value: "1"
[choice] -mode Choice of processing mode, either raster or large-scale. vector,raster. Mandatory: True. Default Value: "vector"
[group] -vector
[param] -mode.vector.out <string> The output vector file or database (name can be anything understood by OGR). Mandatory: True. Default Value: ""
[param] -mode.vector.outmode <string> This allows to set the writing behaviour for the output vector file. Please note that the actual behaviour depends on the file format.. Mandatory: True. Default Value: "ulco"
[param] -mode.vector.inmask <string> Only pixels whose mask value is strictly positive will be segmented.. Mandatory: False. Default Value: ""
[param] -mode.vector.stitch <boolean> Scan polygons on each side of tiles and stitch polygons which connect by more than one pixel.. Mandatory: False. Default Value: "True"
[param] -mode.vector.minsize <int32> Objects whose size is below the minimum object size (area in pixels) will be ignored during vectorization.. Mandatory: False. Default Value: "1"
[param] -mode.vector.simplify <float> Simplify polygons according to a given tolerance (in pixel). This option allows to reduce the size of the output file or database.. Mandatory: False. Default Value: "0.1"
[param] -mode.vector.layername <string> Name of the layer in the vector file or database (default is Layer).. Mandatory: True. Default Value: "layer"
[param] -mode.vector.fieldname <string> Name of the field holding the geometry index in the output vector file or database.. Mandatory: True. Default Value: "DN"
[param] -mode.vector.tilesize <int32> User defined tiles size for tile-based segmentation. Optimal tile size is selected according to available RAM if null.. Mandatory: True. Default Value: "1024"
[param] -mode.vector.startlabel <int32> Starting value of the geometry index field. Mandatory: True. Default Value: "1"
[param] -mode.vector.ogroptions <string> A list of layer creation options in the form KEY=VALUE that will be passed directly to OGR without any validity checking. Options may depend on the file format, and can be found in OGR documentation.. Mandatory: False. Default Value: ""
In raster mode, the application can not handle large input images. Stitching step of vector mode might become slow with very large input images. MeanShift filter results depends on threads number.
Authors
OTB-Team
See Also
MeanShiftSegmentation
Example of use
Example of use with vector mode and watershed segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: vector
mode.vector.out: SegmentationVector.sqlite
filter: watershed
Example of use with raster mode and mean-shift segmentation
in: QB_Toulouse_Ortho_PAN.tif
mode: raster
mode.raster.out: SegmentationRaster.tif uint16
filter: meanshift
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Smoothing-anidif.html b/python/plugins/processing/otb/description/doc/Smoothing-anidif.html
new file mode 100644
index 000000000000..56a08faab8b7
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Smoothing-anidif.html
@@ -0,0 +1,5 @@
+
+
+
Smoothing
Brief Description
Apply a smoothing filter to an image
Tags
Image Filtering
Long Description
This application applies smoothing filter to an image. Either gaussian, mean, or anisotropic diffusion are available.
[param] -type.anidif.nbiter <int32> Number of iterations. Mandatory: True. Default Value: "10"
Limitations
None
Authors
OTB-Team
See Also
Example of use
Image smoothing using a mean filter.
in: Romania_Extract.tif
out: smoothedImage_mean.png uchar
type: mean
Image smoothing using an anisotropic diffusion filter.
in: Romania_Extract.tif
out: smoothedImage_ani.png float
type: anidif
type.anidif.timestep: 0.1
type.anidif.nbiter: 5
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Smoothing-gaussian.html b/python/plugins/processing/otb/description/doc/Smoothing-gaussian.html
new file mode 100644
index 000000000000..56a08faab8b7
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Smoothing-gaussian.html
@@ -0,0 +1,5 @@
+
+
+
Smoothing
Brief Description
Apply a smoothing filter to an image
Tags
Image Filtering
Long Description
This application applies smoothing filter to an image. Either gaussian, mean, or anisotropic diffusion are available.
[param] -type.anidif.nbiter <int32> Number of iterations. Mandatory: True. Default Value: "10"
Limitations
None
Authors
OTB-Team
See Also
Example of use
Image smoothing using a mean filter.
in: Romania_Extract.tif
out: smoothedImage_mean.png uchar
type: mean
Image smoothing using an anisotropic diffusion filter.
in: Romania_Extract.tif
out: smoothedImage_ani.png float
type: anidif
type.anidif.timestep: 0.1
type.anidif.nbiter: 5
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Smoothing-mean.html b/python/plugins/processing/otb/description/doc/Smoothing-mean.html
new file mode 100644
index 000000000000..56a08faab8b7
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/Smoothing-mean.html
@@ -0,0 +1,5 @@
+
+
+
Smoothing
Brief Description
Apply a smoothing filter to an image
Tags
Image Filtering
Long Description
This application applies smoothing filter to an image. Either gaussian, mean, or anisotropic diffusion are available.
[param] -type.anidif.nbiter <int32> Number of iterations. Mandatory: True. Default Value: "10"
Limitations
None
Authors
OTB-Team
See Also
Example of use
Image smoothing using a mean filter.
in: Romania_Extract.tif
out: smoothedImage_mean.png uchar
type: mean
Image smoothing using an anisotropic diffusion filter.
in: Romania_Extract.tif
out: smoothedImage_ani.png float
type: anidif
type.anidif.timestep: 0.1
type.anidif.nbiter: 5
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Smoothing.html b/python/plugins/processing/otb/description/doc/Smoothing.html
index 633868ed6a3d..56a08faab8b7 100644
--- a/python/plugins/processing/otb/description/doc/Smoothing.html
+++ b/python/plugins/processing/otb/description/doc/Smoothing.html
@@ -1 +1,5 @@
-
Smoothing Application
Brief Description
Apply a smoothing filter to an image
Tags
Image Filtering
Long Description
This application applies smoothing filter to an image. Either gaussian, mean, or anisotropic diffusion are available.
Parameters
[param] Input Image (-in): Input image to filter.
[param] Output Image (-out): filtered image.
[param] Available RAM (-ram): Available RAM
[choice] Smoothing Type (-type): smoothing kernel to apply : mean, gaussian, anisotropric diffusion.
[group] Mean:
[param] Radius (-radius): Radius in pixels
[group] Gaussian:
[param] Radius (-radius): Radius in pixels
[group] Anisotropic Diffusion:
[param] Time Step (-timestep):
[param] Nb Iterations (-nbiter):
Limitations
None
Authors
OTB-Team
See also
Example of use
Parameters to set value:
Input Image: poupees.tif
Output Image: osmoothedImage.png uchar
Smoothing Type: mean
Command line to execute:
otbcli_Smoothing -in poupees.tif -out osmoothedImage.png uchar -type mean
\ No newline at end of file
+
+
+
Smoothing
Brief Description
Apply a smoothing filter to an image
Tags
Image Filtering
Long Description
This application applies smoothing filter to an image. Either gaussian, mean, or anisotropic diffusion are available.
[param] -type.anidif.nbiter <int32> Number of iterations. Mandatory: True. Default Value: "10"
Limitations
None
Authors
OTB-Team
See Also
Example of use
Image smoothing using a mean filter.
in: Romania_Extract.tif
out: smoothedImage_mean.png uchar
type: mean
Image smoothing using an anisotropic diffusion filter.
in: Romania_Extract.tif
out: smoothedImage_ani.png float
type: anidif
type.anidif.timestep: 0.1
type.anidif.nbiter: 5
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/SplitImage.html b/python/plugins/processing/otb/description/doc/SplitImage.html
index deedec9d2323..c5c0238f2d68 100644
--- a/python/plugins/processing/otb/description/doc/SplitImage.html
+++ b/python/plugins/processing/otb/description/doc/SplitImage.html
@@ -1 +1,5 @@
-
Split Image Application
Brief Description
Split a N multiband image into N images
Tags
Image Manipulation
Long Description
This application splits a N multiband image into N images. The output images filename will be generated from the output one. Thus if the input image has 2 channels, and the user has set ad output outimage.tif, the generated images will be outimage_0.tif and outimage_1.tif
Parameters
[param] Input Image (-in): Input image filename.
[param] Output Image (-out): Will be used to get the prefix and the extension of the output images to write
This application splits a N-bands image into N mono-band images. The output images filename will be generated from the output parameter. Thus if the input image has 2 channels, and the user has set an output outimage.tif, the generated images will be outimage_0.tif and outimage_1.tif
[param] -out <string> Output filename that will be used to get the prefix and the extension of the output images to write. Mandatory: True. Default Value: ""
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: VegetationIndex.hd
out: splittedImage.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/StereoFramework.html b/python/plugins/processing/otb/description/doc/StereoFramework.html
new file mode 100644
index 000000000000..d0c1b5118959
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/StereoFramework.html
@@ -0,0 +1,16 @@
+
+
+
StereoFramework
Brief Description
Compute the ground elevation based on one or multiple stereo pair(s)
Tags
Stereo
Long Description
Compute the ground elevation with a stereo block matching algorithm between one or mulitple stereo pair in sensor geometry. The output is projected in desired geographic or cartographic map projection (UTM by default). The pipeline is made of the following steps:
+for each sensor pair :
+ - compute the epipolar deformation grids from the stereo pair (direct and inverse)
+ - resample the stereo pair into epipolar geometry using BCO interpolation
+ - create masks for each epipolar image : remove black borders and resample input masks
+ - compute horizontal disparities with a block matching algorithm
+ - refine disparities to sub-pixel precision with a dichotomy algorithm
+ - apply an optional median filter
+ - filter disparities based on the correlation score and exploration bounds
+ - translate disparities in sensor geometry
+ convert disparity to 3D Map.
+Then fuse all 3D maps to produce DSM.
Parameters
[param] -input <string> This group of parameters allows to parametrize input data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -output <string> This group of parameters allows to choose the DSM resolution, nodata value, and projection parameters.. Mandatory: True. Default Value: "0"
[param] -stereorect <string> This group of parameters allows to choose direct and inverse grid subsampling. These parameters are very useful to tune time and memory consumption.. Mandatory: True. Default Value: "0"
[param] -bm <string> This group of parameters allow to tune the block-matching behavior. Mandatory: True. Default Value: "0"
[param] -postproc <string> This group of parameters allow use optional filters.. Mandatory: True. Default Value: "0"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -map Parameters of the output map projection to be used. utm,lambert2,lambert93,wgs,epsg. Mandatory: True. Default Value: "wgs"
[group] -utm
[param] -map.utm.zone <int32> The zone number ranges from 1 to 60 and allows to define the transverse mercator projection (along with the hemisphere). Mandatory: True. Default Value: "31"
[param] -map.utm.northhem <boolean> The transverse mercator projections are defined by their zone number as well as the hemisphere. Activate this parameter if your image is in the northern hemisphere.. Mandatory: False. Default Value: "True"
[group] -lambert2
[group] -lambert93
[group] -wgs
[group] -epsg
[param] -map.epsg.code <int32> See www.spatialreference.org to find which EPSG code is associated to your projection. Mandatory: True. Default Value: "4326"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/StereoRectificationGridGenerator.html b/python/plugins/processing/otb/description/doc/StereoRectificationGridGenerator.html
new file mode 100644
index 000000000000..1df1bb8479f3
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/StereoRectificationGridGenerator.html
@@ -0,0 +1,5 @@
+
+
+
StereoRectificationGridGenerator
Brief Description
Generates two deformation fields to stereo-rectify (i.e. resample in epipolar geometry) a pair of stereo images up to the sensor model precision
Tags
Stereo
Long Description
This application generates a pair of deformation grid to stereo-rectify a pair of stereo images according to sensor modelling and a mean elevation hypothesis. The deformation grids can be passed to the StereoRectificationGridGenerator application for actual resampling in epipolar geometry.
Parameters
[param] -io <string> This group of parameters allows to set the input and output images.. Mandatory: True. Default Value: "0"
[param] -epi <string> Parameters of the epipolar geometry and output grids. Mandatory: True. Default Value: "0"
[param] -inverse <string> This group of parameter allows to generate the inverse fields as well. Mandatory: True. Default Value: "0"
Limitations
Generation of the deformation grid is not streamable, pay attention to this fact when setting the grid step.
Authors
OTB-Team
See Also
otbGridBasedImageResampling
Example of use
io.inleft: wv2_xs_left.tif
io.inright: wv2_xs_left.tif
io.outleft: wv2_xs_left_epi_field.tif
io.outright: wv2_xs_right_epi_field.tif
epi.elevation.default: 400
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/Superimpose.html b/python/plugins/processing/otb/description/doc/Superimpose.html
index e3b68526637e..0381f6569bdf 100644
--- a/python/plugins/processing/otb/description/doc/Superimpose.html
+++ b/python/plugins/processing/otb/description/doc/Superimpose.html
@@ -1 +1,5 @@
-
Superimpose sensor application
Brief Description
Using available image metadata, project one image onto another one
Tags
Geometry, Image Manipulation, Superimposition
Long Description
This application performs /....
Parameters
[param] Reference input (-inr):
[param] The image to reproject (-inm):
[param] DEM directory (-dem):
[param] Spacing of the deformation field (-lms): Generate a coarser deformation field with the given spacing
[param] -inm <string> The image to reproject into the geometry of the reference input.. Mandatory: True. Default Value: ""
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -lms <float> Generate a coarser deformation field with the given spacing. Mandatory: False. Default Value: "4"
[param] -ram <int32> Available memory for processing (in MB). Mandatory: False. Default Value: "128"
[choice] -interpolator This group of parameters allows to define how the input image will be interpolated during resampling. bco,nn,linear. Mandatory: True. Default Value: "bco"
[group] -bco
[param] -interpolator.bco.radius <int32> This parameter allows to control the size of the bicubic interpolation filter. If the target pixel size is higher than the input pixel size, increasing this parameter will reduce aliasing artefacts.. Mandatory: True. Default Value: "2"
[group] -nn
[group] -linear
Limitations
None
Authors
OTB-Team
See Also
Example of use
inr: QB_Toulouse_Ortho_PAN.tif
inm: QB_Toulouse_Ortho_XS.tif
out: SuperimposedXS_to_PAN.tif
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TestApplication.html b/python/plugins/processing/otb/description/doc/TestApplication.html
new file mode 100644
index 000000000000..c74750943211
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TestApplication.html
@@ -0,0 +1,5 @@
+
+
+
TestApplication
Brief Description
This application helps developers to test parameters types
Tags
Test
Long Description
The purpose of this application is to test parameters types.
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TileFusion.html b/python/plugins/processing/otb/description/doc/TileFusion.html
new file mode 100644
index 000000000000..142dc73da58a
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TileFusion.html
@@ -0,0 +1,5 @@
+
+
+
TileFusion
Brief Description
Fusion of an image made of several tile files.
Tags
Image Manipulation
Long Description
Concatenate several tile files into a single image file.
Parameters
[param] -il <string> Input tiles to concatenate (in lexicographic order : (0,0) (1,0) (0,1) (1,1)).. Mandatory: True. Default Value: "0"
[param] -cols <int32> Number of columns in the tile array. Mandatory: True. Default Value: "0"
[param] -rows <int32> Number of rows in the tile array. Mandatory: True. Default Value: "0"
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-ann.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-ann.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-ann.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-bayes.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-bayes.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-bayes.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-boost.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-boost.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-boost.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-dt.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-dt.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-dt.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-gbt.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-gbt.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-gbt.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-knn.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-knn.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-knn.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-libsvm.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-libsvm.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-libsvm.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-rf.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-rf.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-rf.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier-svm.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-svm.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier-svm.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/TrainImagesClassifier.html b/python/plugins/processing/otb/description/doc/TrainImagesClassifier.html
new file mode 100644
index 000000000000..f50791258f5d
--- /dev/null
+++ b/python/plugins/processing/otb/description/doc/TrainImagesClassifier.html
@@ -0,0 +1,11 @@
+
+
+
TrainImagesClassifier
Brief Description
Train a classifier from multiple pairs of images and training vector data.
Tags
Learning
Long Description
This application performs a classifier training from multiple pairs of input images and training vector data. Samples are composed of pixel values in each band optionally centered and reduced using an XML statistics file produced by the ComputeImagesStatistics application.
+ The training vector data must contain polygons with a positive integer field representing the class label. The name of this field can be set using the "Class label field" parameter. Training and validation sample lists are built such that each class is equally represented in both lists. One parameter allows to control the ratio between the number of samples in training and validation sets. Two parameters allow to manage the size of the training and validation sets per class and per image.
+ Several classifier parameters can be set depending on the chosen classifier. In the validation process, the confusion matrix is organized the following way: rows = reference labels, columns = produced labels. In the header of the optional confusion matrix output file, the validation (reference) and predicted (produced) class labels are ordered according to the rows/columns of the confusion matrix.
+ This application is based on LibSVM and on OpenCV Machine Learning classifiers, and is compatible with OpenCV 2.3.1 and later.
Parameters
[param] -io <string> This group of parameters allows to set input and output data.. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
[param] -sample <string> This group of parameters allows to set training and validation sample lists parameters.. Mandatory: True. Default Value: "0"
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
[choice] -classifier Choice of the classifier to use for the training. libsvm,svm,boost,dt,gbt,ann,bayes,rf,knn. Mandatory: True. Default Value: "libsvm"
[param] -classifier.libsvm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.c <float> SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.. Mandatory: True. Default Value: "1"
[param] -classifier.svm.nu <float> Parameter nu of a SVM optimization problem.. Mandatory: True. Default Value: "0"
[param] -classifier.svm.coef0 <float> Parameter coef0 of a kernel function (POLY / SIGMOID).. Mandatory: True. Default Value: "0"
[param] -classifier.svm.gamma <float> Parameter gamma of a kernel function (POLY / RBF / SIGMOID).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.degree <float> Parameter degree of a kernel function (POLY).. Mandatory: True. Default Value: "1"
[param] -classifier.svm.opt <boolean> SVM parameters optimization flag.
+-If set to True, then the optimal SVM parameters will be estimated. Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of the training samples using the k-fold cross-validation (with k = 10).
+-If set to False, the SVM classification process will be computed once with the currently set input SVM parameters over the training samples.
+-Thus, even with identical input SVM parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) because the samples are not identically processed within OpenCV.. Mandatory: False. Default Value: "True"
[group] -boost
[param] -classifier.boost.t <string> Type of Boosting algorithm.. Mandatory: True. Default Value: "real"
[param] -classifier.boost.w <int32> The number of weak classifiers.. Mandatory: True. Default Value: "100"
[param] -classifier.boost.r <float> A threshold between 0 and 1 used to save computational time. Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. Set this parameter to 0 to turn off this functionality.. Mandatory: True. Default Value: "0.95"
[param] -classifier.boost.m <int32> Maximum depth of the tree.. Mandatory: True. Default Value: "1"
[group] -dt
[param] -classifier.dt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "65535"
[param] -classifier.dt.min <int32> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.f <int32> If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.. Mandatory: True. Default Value: "10"
[param] -classifier.dt.r <boolean> If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.. Mandatory: False. Default Value: "True"
[param] -classifier.dt.t <boolean> If true, then pruned branches are physically removed from the tree.. Mandatory: False. Default Value: "True"
[group] -gbt
[param] -classifier.gbt.w <int32> Number "w" of boosting algorithm iterations, with w*K being the total number of trees in the GBT model, where K is the output number of classes.. Mandatory: True. Default Value: "200"
[param] -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration. The subset is generated randomly.. Mandatory: True. Default Value: "0.8"
[param] -classifier.gbt.max <int32> The training algorithm attempts to split each node while its depth is smaller than the maximum possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or if the tree is pruned.. Mandatory: True. Default Value: "3"
[group] -ann
[param] -classifier.ann.t <string> Type of training method for the multilayer perceptron (MLP) neural network.. Mandatory: True. Default Value: "reg"
[param] -classifier.ann.sizes <string> The number of neurons in each intermediate layer (excluding input and output layers).. Mandatory: True. Default Value: ""
[param] -classifier.ann.a <float> Alpha parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.b <float> Beta parameter of the activation function (used only with sigmoid and gaussian functions).. Mandatory: True. Default Value: "1"
[param] -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).. Mandatory: True. Default Value: "0.1"
[param] -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).. Mandatory: True. Default Value: "1e-07"
[param] -classifier.ann.eps <float> Epsilon value used in the Termination criteria.. Mandatory: True. Default Value: "0.01"
[param] -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria.. Mandatory: True. Default Value: "1000"
[group] -bayes
[group] -rf
[param] -classifier.rf.max <int32> The depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.. Mandatory: True. Default Value: "5"
[param] -classifier.rf.min <int32> If the number of samples in a node is smaller than this parameter, then the node will not be split. A reasonable value is a small percentage of the total data e.g. 1 percent.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.ra <float> If all absolute differences between an estimated value in a node and the values of the train samples in this node are smaller than this regression accuracy parameter, then the node will not be split.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.. Mandatory: True. Default Value: "10"
[param] -classifier.rf.var <int32> The size of the subset of features, randomly selected at each tree node, that are used to find the best split(s). If you set it to 0, then the size will be set to the square root of the total number of features.. Mandatory: True. Default Value: "0"
[param] -classifier.rf.nbtrees <int32> The maximum number of trees in the forest. Typically, the more trees you have, the better the accuracy. However, the improvement in accuracy generally diminishes and reaches an asymptote for a certain number of trees. Also to keep in mind, increasing the number of trees increases the prediction time linearly.. Mandatory: True. Default Value: "100"
[param] -classifier.knn.k <int32> The number of neighbors to use.. Mandatory: True. Default Value: "32"
Limitations
None
Authors
OTB-Team
See Also
OpenCV documentation for machine learning http://docs.opencv.org/modules/ml/doc/ml.html
Example of use
io.il: QB_1_ortho.tif
io.vd: VectorData_QB1.shp
io.imstat: EstimateImageStatisticsQB1.xml
sample.mv: 100
sample.mt: 100
sample.vtr: 0.5
sample.edg: false
sample.vfn: Class
classifier: libsvm
classifier.libsvm.k: linear
classifier.libsvm.c: 1
classifier.libsvm.opt: false
io.out: svmModelQB1.txt
io.confmatout: svmConfusionMatrixQB1.csv
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/VectorDataDSValidation.html b/python/plugins/processing/otb/description/doc/VectorDataDSValidation.html
index 824883eb4e7a..dcc02ceb53fc 100644
--- a/python/plugins/processing/otb/description/doc/VectorDataDSValidation.html
+++ b/python/plugins/processing/otb/description/doc/VectorDataDSValidation.html
@@ -1 +1,5 @@
-
Vector Data validation Application
Brief Description
Vector data validation based on the fusion of features using Dempster-Shafer evidence theory framework.
Tags
Feature Extraction
Long Description
Vector data validation based on the fusion of features using Dempster-Shafer evidence theory framework.
Parameters
[param] Input Vector Data (-in): Input vector data for validation
[param] Descriptors model filename (-descmod): Fuzzy descriptors model (xml file)
[param] Belief Support (-belsup): Dempster Shafer study hypothesis to compute belief
[param] Plausibility Support (-plasup): Dempster Shafer study hypothesis to compute plausibility
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/VectorDataExtractROIApplication.html b/python/plugins/processing/otb/description/doc/VectorDataExtractROIApplication.html
index 01f64e6d27cb..74831ebd0499 100644
--- a/python/plugins/processing/otb/description/doc/VectorDataExtractROIApplication.html
+++ b/python/plugins/processing/otb/description/doc/VectorDataExtractROIApplication.html
@@ -1 +1,5 @@
-
VectorData Extract ROI Application
Brief Description
Perform an extract ROI on the input vector data according to the input image extent
Tags
Vector data Manipulation
Long Description
This application extract the VectorData features belonging to a region specified by the support image envelope
Parameters
[group] Input and output data (-io):
[param] Input Vector data (-vd):
[param] Support image (-in):
[param] Output Vector data (-out):
[group] Elevation management (-elev): This group of parameters allows to manage elevation values in the VectorData projection process
[param] DEM directory (-dem): This parameter allows to select a directory containing Digital Elevation Model tiles. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application
Perform an extract ROI on the input vector data according to the input image extent
Tags
Vector Data Manipulation
Long Description
This application extracts the vector data features belonging to a region specified by the support image envelope
Parameters
[param] -io <string> Group containing input and output parameters. Mandatory: True. Default Value: "0"
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
This application allows to reproject a vector data using support image projection reference, or a user
-specified map projection
-
Detailed description
-
This application allows to reproject a vector data using support image projection reference, or a user given
-map projection. If given, image keywordlist can be added to reprojected vectordata.
-
Parameters
-
This section describes in details the parameters available for this application. Table 4.13, page 305
-presents a summary of these parameters and the parameters keys to be used in command-line and
-programming languages. Application key is VectorDataReprojection.
-
-
-
-
-
-
-
-
Parameter key
Parameter type
Parameter description
-
in
Group
Input data
-
in.vd
Input File name
Input vector data
-
in.kwl
Input image
Use image keywords list
-
out
Group
Output data
-
out.vd
Output File name
Output vector data
-
out.proj
Choices
Output Projection choice
-
out.proj image
Choice
Use image projection ref
-
out.proj user
Choice
User defined projection
-
out.proj.image.in
Input image
Image used to get projection map
-
out.proj.user.map
Choices
Output Cartographic Map Projection
-
out.proj.user.map utm
Choice
Universal Trans-Mercator (UTM)
-
out.proj.user.map lambert2
Choice
Lambert II Etendu
-
out.proj.user.map lambert93
Choice
Lambert93
-
out.proj.user.map wgs
Choice
WGS 84
-
out.proj.user.map epsg
Choice
EPSG Code
-
out.proj.user.map.utm.zone
Int
Zone number
-
out.proj.user.map.utm.northhem
Boolean
Northern Hemisphere
-
out.proj.user.map.epsg.code
Int
EPSG Code
-
-
-
Table 4.13: Parameters table for Vector Data reprojection.
-
-
-
-
-
Input data
-
-
Input vector data: The input vector data to reproject
-
-
Use image keywords list: Optional input image to fill vector data with image kwl.
-
-
Output data
-
-
Output vector data: The reprojected vector data
-
-
Output Projection choice:
-
Available choices are:
-
-
Use image projection ref: Vector data will be reprojected in image projection ref.
-
-
Image used to get projection map: Projection map will be found using image
- metadata
-
-
User defined projection
-
-
Output Cartographic Map Projection: Parameters of the ouptut map projection
- to be used.
-
-
Zone number: The zone number ranges from 1 to 60 and allows to define the
- transverse mercator projection (along with the hemisphere)
-
-
Northern Hemisphere: The transverse mercator projections are defined by their
- zone number as well as the hemisphere. Activate this parameter if your image is in
- the northern hemisphere.
-
-
-
-
EPSG Code: See www.spatialreference.org to find which EPSG code is associated
- to your projection
-
-
-
-
Example
-
To run this example in command-line, use the following:
-
This application allows to reproject a vector data using support image projection reference, or a user specified map projection
+
Tags
Geometry,Vector Data Manipulation,Coordinates
Long Description
This application allows to reproject a vector data using support image projection reference, or a user given map projection.
+ If given, image keywordlist can be added to reprojected vectordata.
[param] -elev <string> This group of parameters allows to manage elevation values. Supported formats are SRTM, DTED or any geotiff processed by the DEM import application. Mandatory: True. Default Value: "0"
Limitations
Authors
OTB-Team
See Also
Example of use
in.vd: VectorData_QB1.shp
out.proj: image
out.proj.image.in: ROI_QB_MUL_1.tif
out.vd: reprojected_vd.shp
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/VectorDataSetField.html b/python/plugins/processing/otb/description/doc/VectorDataSetField.html
index 096a7385056b..f28354e410dd 100644
--- a/python/plugins/processing/otb/description/doc/VectorDataSetField.html
+++ b/python/plugins/processing/otb/description/doc/VectorDataSetField.html
@@ -1 +1,5 @@
-
Vector data set field Application
Brief Description
Set a field in vector data.
Tags
Vector data Manipulation
Long Description
Set a specified field to a specified value on all features of a vector data (Note: doesn't work with KML files yet)
[param] -fn <string> Field name. Mandatory: True. Default Value: ""
[param] -fv <string> Field value. Mandatory: True. Default Value: ""
Limitations
Doesn't work with KML files yet
Authors
OTB-Team
See Also
Example of use
in: qb_RoadExtract_classification.shp
out: VectorDataSetField.shp
fn: Info
fv: Sample polygon
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/VectorDataTransform.html b/python/plugins/processing/otb/description/doc/VectorDataTransform.html
index 1a51014f7da8..2b64b0b78aa0 100644
--- a/python/plugins/processing/otb/description/doc/VectorDataTransform.html
+++ b/python/plugins/processing/otb/description/doc/VectorDataTransform.html
@@ -1 +1,5 @@
-
Vector Data Transformation Application
Brief Description
Apply a transform to each vertex of the input VectorData
Tags
Vector data Manipulation
Long Description
This application performs a transformation to an input vector data transforming each vertex that composed the vector data. The applied transformation manages translation, rotation and scale, and be be centered or not.
Parameters
[param] Input Vector data (-vd):
[param] Output Vector data (-out):
[param] Support image (-in): Image needed as a support of the VectorData
[group] Transform parameters (-transform):
[param] Translation X (-tx): Translation in the X direction
[param] Translation Y (-ty): Translation in the Y direction
[param] Rotation Angle (-ro): Angle of the rotation to apply in degrees
[param] Center X (-centerx): The first coordinate of the rotation center
[param] Center Y (-centery): The second coordinate of the rotation center
Apply a transform to each vertex of the input VectorData
Tags
Vector Data Manipulation
Long Description
This application performs a transformation of an input vector data transforming each vertex in the vector data. The applied transformation manages translation, rotation and scale, and can be centered or not.
Parameters
[param] -vd <string> Input vector data to transform. Mandatory: True. Default Value: ""
[param] -in <string> Image needed as a support to the vector data. Mandatory: True. Default Value: ""
[param] -transform <string> Group of parameters to define the transform. Mandatory: True. Default Value: "0"
Limitations
None
Authors
OTB-Team
See Also
Example of use
vd: qb_RoadExtract_easyClassification.shp
in: qb_RoadExtract.tif
out: VectorDataTransform.shp
transform.ro: 5
\ No newline at end of file
diff --git a/python/plugins/processing/otb/description/doc/VertexComponentAnalysis.html b/python/plugins/processing/otb/description/doc/VertexComponentAnalysis.html
index 681594c69e0c..14422d1b8425 100644
--- a/python/plugins/processing/otb/description/doc/VertexComponentAnalysis.html
+++ b/python/plugins/processing/otb/description/doc/VertexComponentAnalysis.html
@@ -1 +1,5 @@
-
Vertex Component Analysis
Brief Description
Find endmembers in hyperspectral images with Vertex Component Analysis
Tags
Hyperspectral, Dimensionality Reduction
Long Description
Applies the Vertex Component Analysis to an hyperspectral image to extract endmembers
Parameters
[param] Input Image (-in): Input hyperspectral data cube
[param] Number of endmembers (-ne): The number of endmembers to extract from the data cube
[param] Output Endmembers (-outendm): The endmebers, stored in a one-line multi-spectral image, each pixel representing an endmember
[param] -ne <int32> The number of endmembers to extract from the data cube. Mandatory: True. Default Value: "1"
[param] -outendm <string> The endmebers, stored in a one-line multi-spectral image, each pixel representing an endmember. Mandatory: True. Default Value: ""
[param] -rand <int32> Set specific seed. with integer value.. Mandatory: False. Default Value: "0"
Limitations
None
Authors
OTB-Team
See Also
Example of use
in: cupriteSubHsi.tif
ne: 5
outendm: VertexComponentAnalysis.tif double
\ No newline at end of file
diff --git a/python/plugins/processing/otb/maintenance/OTBHelper.py b/python/plugins/processing/otb/maintenance/OTBHelper.py
new file mode 100644
index 000000000000..4354f71f6d8f
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/OTBHelper.py
@@ -0,0 +1,618 @@
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ OTBHelper.py
+ ---------------------
+ Copyright : (C) 2013 by CS Systemes d'information (CS SI)
+ Email : otb at c-s dot fr (CS SI)
+ Contributors : Julien Malik (CS SI) - File creation
+ Oscar Picas (CS SI) -
+ Alexia Mondot (CS SI) - Add particular case in xml creation
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+"""
+__author__ = 'Julien Malik, Oscar Picas, Alexia Mondot'
+__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
+# This will get replaced with a git SHA1 when you do a git archive
+__revision__ = '$Format:%H$'
+__version__ = "3.8"
+
+import os
+import copy
+
+try:
+ import processing
+except ImportError, e:
+ raise Exception("Processing must be installed and available in PYTHONPATH")
+
+try:
+ import otbApplication
+except ImportError, e:
+ raise Exception("OTB python plugins must be installed and available in PYTHONPATH")
+
+import xml.etree.ElementTree as ET
+import traceback
+
+import processing.otb.OTBSpecific_XMLcreation as OTBSpecific_XMLcreation
+from contextlib import contextmanager
+import shutil
+
+@contextmanager
+def tag(name, c):
+ c.append("<%s>" % name)
+ yield
+ if ' ' in name:
+ c.append("%s>" % name.split(' ')[0])
+ else:
+ c.append("%s>" % name)
+
+@contextmanager
+def opentag(name, c):
+ c.append("<%s>" % name)
+ yield
+
+def get_group( appInstance ) :
+ tags = appInstance.GetDocTags()
+ sectionTags = ["Image Manipulation","Vector Data Manipulation", "Calibration","Geometry", "Image Filtering","Feature Extraction","Stereo","Learning","Segmentation"]
+ for sectionTag in sectionTags:
+ for tag in tags:
+ if tag == sectionTag:
+ return sectionTag
+ return "Miscellaneous"
+
+def set_OTB_log():
+ import logging
+ logger = logging.getLogger('OTBGenerator')
+ hdlr = logging.FileHandler('OTBGenerator.log')
+ hdlr.setLevel(logging.DEBUG)
+ cons = logging.StreamHandler()
+ cons.setLevel(logging.CRITICAL)
+ formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+ hdlr.setFormatter(formatter)
+ logger.addHandler(hdlr)
+ logger.addHandler(cons)
+ logger.setLevel(logging.DEBUG)
+
+def get_OTB_log():
+ import logging
+ logger = logging.getLogger('OTBGenerator')
+ if not logger.handlers:
+ set_OTB_log()
+ logger = logging.getLogger('OTBGenerator')
+ return logger
+
+def indent(elem, level = 0):
+ i = "\n" + level*" "
+ if len(elem):
+ if not elem.text or not elem.text.strip():
+ elem.text = i + " "
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ for elem in elem:
+ indent(elem, level + 1)
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ else:
+ if level and (not elem.tail or not elem.tail.strip()):
+ elem.tail = i
+
+set_OTB_log()
+
+def get_parameters():
+ parameters = { getattr(otbApplication, each): each for each in dir(otbApplication) if 'ParameterType_' in each}
+ return parameters
+
+def get_inverted_parameters():
+ """
+ This function allows to map otb parameters with processing parameters.
+ """
+ parameters = { getattr(otbApplication, each): each for each in dir(otbApplication) if 'ParameterType_' in each}
+ parameters_clone = copy.deepcopy(parameters)
+
+ inverted_parameters = { key: value for value, key in parameters.items() }
+ inverted_parameters['ParameterType_Radius'] = 1
+ inverted_parameters['ParameterType_RAM'] = 1
+ inverted_parameters['ParameterType_ComplexInputImage'] = 9
+ inverted_parameters['ParameterType_ComplexOutputImage'] = 13
+
+ inverted_parameters_clone = copy.deepcopy(inverted_parameters)
+ inverted_parameters_clone['ParameterType_Empty'] = 'ParameterBoolean'
+ inverted_parameters_clone['ParameterType_Int'] = 'ParameterNumber'
+ inverted_parameters_clone['ParameterType_Float'] = 'ParameterNumber'
+ inverted_parameters_clone['ParameterType_String'] = 'ParameterString'
+ inverted_parameters_clone['ParameterType_StringList'] = 'ParameterString'
+ inverted_parameters_clone['ParameterType_InputFilename'] = 'ParameterFile'
+ inverted_parameters_clone['ParameterType_OutputFilename'] = 'OutputFile'
+ inverted_parameters_clone['ParameterType_Directory'] = 'ParameterFile'
+ inverted_parameters_clone['ParameterType_Choice'] = 'ParameterSelection'
+ inverted_parameters_clone['ParameterType_InputImage'] = 'ParameterRaster'
+ inverted_parameters_clone['ParameterType_InputImageList'] = 'ParameterMultipleInput'
+ inverted_parameters_clone['ParameterType_InputVectorData'] = 'ParameterVector'
+ inverted_parameters_clone['ParameterType_InputVectorDataList'] = 'ParameterMultipleInput'
+ inverted_parameters_clone['ParameterType_OutputImage'] = 'OutputRaster'
+ inverted_parameters_clone['ParameterType_OutputVectorData'] = 'OutputVector'
+ inverted_parameters_clone['ParameterType_Radius'] = 'ParameterNumber'
+ inverted_parameters_clone['ParameterType_Group'] = None
+ inverted_parameters_clone['ParameterType_ListView'] = 'ParameterSelection'
+ inverted_parameters_clone['ParameterType_ComplexInputImage'] = 'ParameterRaster'
+ inverted_parameters_clone['ParameterType_ComplexOutputImage'] = 'OutputRaster'
+ inverted_parameters_clone['ParameterType_RAM'] = 'ParameterNumber'
+ inverted_parameters_clone['ParameterType_InputProcessXML'] = 'ParameterFile'
+ inverted_parameters_clone['ParameterType_OutputProcessXML'] = 'ParameterFile'
+ inverted_parameters_clone['ParameterType_InputFilenameList'] = 'ParameterMultipleExternalInput' # 'ParameterString'
+
+ return inverted_parameters_clone
+
+def retrieve_module_name(param):
+ """
+ returns the file parameter of the given processing parameter
+ """
+ import processing
+ if param:
+ try :
+ if 'Parameter' in param:
+ return eval('processing.parameters.%s.__file__' % param).replace('pyc', 'py')
+ if 'Output' in param:
+ return eval('processing.outputs.%s.__file__' % param).replace('pyc', 'py')
+ except TypeError:
+ print "Error parsing ", param
+ return None
+
+def get_constructor_parameters_from_filename(py_file):
+ import ast
+ asto = ast.parse(open(py_file).read())
+ e1 = [each for each in asto.body if type(each) is ast.ClassDef]
+ e2 = [each for each in e1[0].body if hasattr(each, "name") and each.name == "__init__"]
+ if len(e2) > 0:
+ e3 = len(e2[0].args.args)
+ e4 = e2[0].args.args
+ else:
+ e4 = []
+ e5 = [each.id for each in e4]
+ return e5
+
+def get_xml_description_from_application_name(our_app, criteria = None):
+ """
+ creates an xml containing informations about the given our_app
+ """
+ # creates the application to get the description
+ # header
+ app_instance = otbApplication.Registry.CreateApplication(our_app)
+ root = ET.Element('root')
+ app = ET.SubElement(root, 'key')
+ app.text = our_app
+ executable = ET.SubElement(root, 'exec')
+ executable.text = "otbcli_" + our_app
+ longname = ET.SubElement(root, 'longname')
+ longname.text = app_instance.GetDocName()
+ group = ET.SubElement(root, 'group')
+ group.text = get_group(app_instance)
+ desc = ET.SubElement(root, 'description')
+ desc.text = app_instance.GetDescription()
+
+ if not criteria:
+ real_criteria = lambda x: True
+ else:
+ if not callable(criteria):
+ raise Exception("criteria parameter must be a valid python callable")
+
+ real_criteria = criteria
+
+ if len(our_app) == 0:
+ raise Exception("App name is empty !")
+
+ # get parameters
+ param_keys = [param_key for param_key in app_instance.GetParametersKeys()]
+ param_keys = filter(real_criteria, param_keys)
+
+ for param_key in param_keys:
+ if not param_key == "inxml" and not param_key == "outxml" :
+ get_param_descriptor(app.text, app_instance, param_key, root)
+ indent(root)
+ return root
+
+def get_the_choices(app_instance, our_descriptor, root):
+ choices = ET.SubElement(root, 'choices')
+ for choice in app_instance.GetChoiceKeys(our_descriptor):
+ choice_node = ET.SubElement(choices, 'choice')
+ choice_node.text = choice
+
+def get_param_descriptor(appkey, app_instance, our_descriptor, root):
+ """
+ update the root xml with the data of the parameter given by "our_descriptor"
+ """
+ logger = get_OTB_log()
+ parameters = get_parameters()
+ our_type = parameters[app_instance.GetParameterType(our_descriptor)]
+
+ #get the list of mapped parameters (otb/processing)
+ inverted_parameters = get_inverted_parameters()
+
+ mapped_parameter = inverted_parameters[our_type]
+
+ file_parameter = retrieve_module_name(mapped_parameter)
+
+ if not file_parameter:
+ logger.info("Type %s is not handled yet. (%s, %s)" % (our_type, appkey, our_descriptor))
+ return
+
+ param = ET.SubElement(root, 'parameter')
+ attrs = {'source_parameter_type' : parameters[app_instance.GetParameterType(our_descriptor)]}
+ if appkey == "Segmentation" :
+ if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename" :
+ attrs = {'source_parameter_type' : 'ParameterType_OutputVectorData'}
+ if appkey == "LSMSVectorization" :
+ if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename" :
+ attrs = {'source_parameter_type' : 'ParameterType_OutputVectorData'}
+ if appkey == "SplitImage" :
+ if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputImage" :
+ attrs = {'source_parameter_type' : 'ParameterType_OutputFilename'}
+
+ param_type = ET.SubElement(param, 'parameter_type', attrib = attrs)
+
+ param_type.text = inverted_parameters[parameters[app_instance.GetParameterType(our_descriptor)]]
+ if appkey == "Segmentation" :
+ if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename" :
+ param_type.text = "OutputVector"
+ if appkey == "LSMSVectorization" :
+ if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputFilename" :
+ param_type.text = "OutputVector"
+ if appkey == "SplitImage" :
+ if parameters[app_instance.GetParameterType(our_descriptor)] == "ParameterType_OutputImage" :
+ param_type.text = "OutputFile"
+
+ the_params = get_constructor_parameters_from_filename(file_parameter)
+ if len(the_params) == 0:
+ if 'Output' in file_parameter:
+ file_path = os.path.join(os.path.dirname(file_parameter), 'Output.py')
+ the_params = get_constructor_parameters_from_filename(file_path)
+ if 'Parameter' in file_parameter:
+ file_path = os.path.join(os.path.dirname(file_parameter), 'Parameter.py')
+ the_params = (file_path)
+
+ if "self" in the_params:
+ the_params = the_params[1:]
+ else:
+ raise Exception("Unexpected constructor parameters")
+
+ key = ET.SubElement(param, 'key')
+ key.text = our_descriptor
+ is_choice_type = False
+ for each in the_params:
+ if each == "name":
+ name = ET.SubElement(param, 'name')
+ name.text = app_instance.GetParameterName(our_descriptor)
+ elif each == "description":
+ desc = ET.SubElement(param, 'description')
+ desc.text = app_instance.GetParameterDescription(our_descriptor)
+ elif each == "optional":
+ optional = ET.SubElement(param, 'optional')
+ optional.text = str(not app_instance.IsMandatory(our_descriptor))
+ elif each == "default":
+ done = False
+ reason = []
+ try:
+ default_value = str(app_instance.GetParameterAsString(our_descriptor))
+ done = True
+ except:
+ reason.append(traceback.format_exc())
+ if not done:
+ try:
+ default_value = str(app_instance.GetParameterFloat(our_descriptor))
+ done = True
+ except:
+ reason.append(traceback.format_exc())
+ if not done:
+ try:
+ default_value = str(app_instance.GetParameterInt(our_descriptor))
+ done = True
+ except:
+ reason.append(traceback.format_exc())
+
+ if done:
+ default = ET.SubElement(param, 'default')
+ default.text = default_value
+
+ if is_choice_type:
+ the_keys = [a_key for a_key in app_instance.GetChoiceKeys(our_descriptor)]
+ if default_value in the_keys:
+ default.text = str(the_keys.index(default_value))
+ else:
+ default.text = ''
+ else:
+ logger.debug("A parameter transformation failed, trying default values : for %s, %s, type %s!, conversion message: %s" % (appkey, our_descriptor, parameters[app_instance.GetParameterType(our_descriptor)], str(reason)))
+ the_type = parameters[app_instance.GetParameterType(our_descriptor)]
+ if the_type == "ParameterType_Int":
+ default_value = "0"
+ elif the_type == "ParameterType_Float":
+ default_value = "0.0"
+ elif the_type == "ParameterType_Empty":
+ default_value = "True"
+ else:
+ raise Exception("Unable to adapt %s, %s, %s, conversion message: %s" % (appkey, our_descriptor, parameters[app_instance.GetParameterType(our_descriptor)], str(reason)))
+
+ default = ET.SubElement(param, 'default')
+ default.text = default_value
+ else:
+ is_choice_type = 'Selection' in param_type.text
+ node = ET.SubElement(param, each)
+ if is_choice_type:
+ get_the_choices(app_instance, our_descriptor, node)
+
+
+def get_default_parameter_value(app_instance, param):
+ parameters = get_parameters()
+ try:
+ return app_instance.GetParameterAsString(param)
+ except:
+ the_type = parameters[app_instance.GetParameterType(param)]
+ default_value = "0"
+ if the_type == "ParameterType_Int":
+ default_value = "0"
+ elif the_type == "ParameterType_Float":
+ default_value = "0.0"
+ elif the_type == "ParameterType_Empty":
+ default_value = "True"
+ return default_value
+
+def escape_html(par):
+ if 'Int' in par:
+ return '<int32>'
+ if 'Float' in par:
+ return '<float>'
+ if 'Empty' in par:
+ return '<boolean>'
+ if 'Radius' in par:
+ return '<int32>'
+ if 'RAM' in par:
+ return '<int32>'
+ return '<string>'
+
+def is_a_parameter(app_instance, param):
+ if app_instance.GetName() == "HaralickTextureExtraction" :
+ if param.startswith( "parameters." ):
+ return True
+ if '.' in param:
+ return False
+ try:
+ app_instance.GetChoiceKeys(param)
+ return False
+ except:
+ return True
+
+
+def describe_app(app_instance):
+ parameters = get_parameters()
+ result = []
+ with tag('html', result):
+ with tag('head', result):
+ how = """
+
+"""
+ result.append(how)
+ with tag('body', result):
+ with tag('h1', result):
+ result.append(app_instance.GetName())
+ with tag('h2', result):
+ result.append('Brief Description')
+ result.append(app_instance.GetDescription())
+ with tag('h2', result):
+ result.append('Tags')
+ result.append(','.join(app_instance.GetDocTags()))
+ with tag('h2', result):
+ result.append('Long Description')
+ result.append(app_instance.GetDocLongDescription())
+ with tag('h2', result):
+ result.append('Parameters')
+ params = app_instance.GetParametersKeys()
+ level = 0
+ with tag('ul', result):
+ for param in params:
+ if is_a_parameter(app_instance, param):
+ with tag('li', result):
+ result.append('%s -%s %s ' % ('[param]', param, escape_html(parameters[app_instance.GetParameterType(param)]) ))
+ result.append('%s. Mandatory: %s. Default Value: "%s"' %(app_instance.GetParameterDescription(param), str(app_instance.IsMandatory(param)), get_default_parameter_value(app_instance, param)))
+ choices_tags = [each for each in params if (not is_a_parameter(app_instance, each)) and (not '.' in each)]
+ for choice in choices_tags:
+ result.append('%s -%s %s %s. Mandatory: %s. Default Value: "%s"' % ('[choice]', choice, app_instance.GetParameterDescription(choice), ','.join(app_instance.GetChoiceKeys(choice)), str(app_instance.IsMandatory(choice)), get_default_parameter_value(app_instance, choice)))
+ choices = app_instance.GetChoiceKeys(choice)
+
+ with tag('ul', result):
+ for subchoice in choices:
+ with tag('li', result):
+ result.append('%s -%s' % ('[group]', subchoice))
+ with tag('ul', result):
+ param_tags = [each for each in params if '.%s' % subchoice in each]
+ for param_tag in param_tags:
+ with tag('li', result):
+ result.append('%s -%s ' % ('[param]', param_tag))
+ result.append("%s %s. Mandatory: %s. Default Value: "%s"" % ( escape_html(parameters[app_instance.GetParameterType(param_tag)]) ,app_instance.GetParameterDescription(param_tag), str(app_instance.IsMandatory(param_tag)), get_default_parameter_value(app_instance, param_tag)))
+ with tag('h2', result):
+ result.append('Limitations')
+ result.append(app_instance.GetDocLimitations())
+ with tag('h2', result):
+ result.append('Authors')
+ result.append(app_instance.GetDocAuthors())
+ with tag('h2', result):
+ result.append('See Also')
+ result.append(app_instance.GetDocSeeAlso())
+ with tag('h2', result):
+ result.append('Example of use')
+ result.append(app_instance.GetHtmlExample())
+ if app_instance.GetName() == "HaralickTextureExtraction" :
+ index = result.index("[param] -parameters <string> ")
+ del result[index +2]
+ del result[index +1]
+ del result[index]
+ del result[index -1]
+ return "".join(result)
+
+def get_list_from_node(myet, available_app):
+ all_params = []
+ for parameter in myet.iter('parameter'):
+ rebuild = []
+ par_type = parameter.find('parameter_type').text
+ key = parameter.find('key').text
+ name = parameter.find('name').text
+ source_par_type = parameter.find('parameter_type').attrib['source_parameter_type']
+ rebuild.append(source_par_type)
+ rebuild.append(par_type)
+ rebuild.append(key)
+ rebuild.append(name)
+ for each in parameter[4:]:
+ if not each.tag in ["hidden"]:
+ if len(each.getchildren()) == 0:
+ if each.tag in ["default"]:
+ if "-" in available_app:
+ available_app = available_app.split("-")[0]
+ app_instance = otbApplication.Registry.CreateApplication(available_app)
+ rebuild.append(get_default_parameter_value(app_instance, key))
+ else:
+ rebuild.append(each.text)
+ else:
+ rebuild.append([item.text for item in each.iter('choice')])
+ all_params.append(rebuild)
+ return all_params
+
+def adapt_list_to_string(c_list):
+ a_list = c_list[1:]
+ if a_list[0] in ["ParameterVector", "ParameterMultipleInput"]:
+ if c_list[0] == "ParameterType_InputImageList":
+ a_list[3] = 3
+ else:
+ a_list[3] = -1
+
+ if a_list[0] in ["ParameterRaster", "ParameterFile", "ParameterMultipleInput", "OutputRaster", "OutputFile"]:
+ if "Output" in a_list[0]:
+ a_list.append("/tmp/processing/output.tif")
+ else:
+ import os
+ import sys
+ a_list.append(os.path.join(os.path.abspath(os.curdir), "helper/QB_Toulouse_Ortho_PAN.tif"))
+
+ if a_list[0] in ["ParameterSelection"]:
+ pass
+
+ a_list[1]="-%s" % a_list[1]
+ def mystr(par):
+ if type(par) == type([]):
+ return ";".join(par)
+ return str(par)
+
+ if a_list[-1] is None:
+ return ""
+
+ b_list = map(mystr, a_list)
+ b_list = [b_list[1], b_list[-1]]
+ res = " ".join(b_list)
+ return res
+
+
+def get_automatic_ut_from_xml_description(the_root):
+ dom_model = the_root
+
+ try:
+ appkey = dom_model.find('key').text
+ cliName = dom_model.find('exec').text
+ name = dom_model.find('longname').text
+ group = dom_model.find('group').text
+
+ if not cliName.startswith("otbcli_"):
+ raise Exception('Wrong client executable')
+
+ rebu = get_list_from_node(dom_model, appkey)
+ the_result = map(adapt_list_to_string,rebu)
+ ut_command = cliName + " " + " ".join(the_result)
+ return ut_command
+ except Exception, e:
+ ET.dump(dom_model)
+ raise
+
+def list_reader(file_name, version):
+ tree = ET.parse(file_name)
+ root = tree.getroot()
+ nodes = [each.text for each in root.findall("./version[@id='%s']/app_name" % version)]
+ return nodes
+
+def get_otb_version():
+ #TODO Find a way to retrieve installed otb version, force exception and parse otb-X.XX.X ?
+ return "3.18"
+
+def get_white_list():
+ nodes = list_reader("white_list.xml",get_otb_version())
+ return nodes
+
+def get_black_list():
+ nodes = list_reader("black_list.xml",get_otb_version())
+ return nodes
+
+def create_xml_descriptors():
+ import os
+ if not os.path.exists("description"):
+ os.mkdir("description")
+ if not os.path.exists("html"):
+ os.mkdir("html")
+
+ logger = get_OTB_log()
+
+ white_list = get_white_list()
+ black_list = get_black_list()
+
+ from processing.otb.OTBSpecific_XMLcreation import *
+ for available_app in otbApplication.Registry.GetAvailableApplications():
+ try:
+ if 'get%s' % available_app in locals():
+ if available_app in white_list and not available_app in black_list:
+ the_root = get_xml_description_from_application_name(available_app)
+ the_list = locals()['get%s' % available_app](available_app, the_root)
+ if the_list:
+ for each_dom in the_list:
+ try:
+ ut_command = get_automatic_ut_from_xml_description(each_dom)
+ except:
+ logger.error("Unit test for command %s must be fixed: %s" % (available_app , traceback.format_exc()))
+ else:
+ logger.warning("%s is not in white list." % available_app)
+
+ else:
+ if available_app in white_list and not available_app in black_list:
+ logger.warning("There is no adaptor for %s, check white list and versions" % available_app)
+ # TODO Remove this default code when all apps are tested...
+ fh = open("description/%s.xml" % available_app, "w")
+ the_root = get_xml_description_from_application_name(available_app)
+ ET.ElementTree(the_root).write(fh)
+ fh.close()
+ try:
+ ut_command = get_automatic_ut_from_xml_description(the_root)
+ except:
+ logger.error("Unit test for command %s must be fixed: %s" % (available_app , traceback.format_exc()))
+
+ except Exception, e:
+ logger.error(traceback.format_exc())
+
+ for available_app in otbApplication.Registry.GetAvailableApplications():
+ try:
+ fh = open("description/doc/%s.html" % available_app, "w")
+ app_instance = otbApplication.Registry.CreateApplication(available_app)
+ app_instance.UpdateParameters()
+ ct = describe_app(app_instance)
+ fh.write(ct)
+ fh.close()
+ except Exception, e:
+ logger.error(traceback.format_exc())
+
+ sub_algo = [each for each in os.listdir("description") if "-" in each and ".xml" in each]
+ for key in sub_algo:
+ shutil.copy("description/doc/%s" % key.split("-")[0] + ".html","description/doc/%s" % key.split(".")[0] + ".html")
+
+if __name__ == "__main__":
+ create_xml_descriptors()
diff --git a/python/plugins/processing/otb/maintenance/OTBSpecific_XMLcreation.py b/python/plugins/processing/otb/maintenance/OTBSpecific_XMLcreation.py
new file mode 100644
index 000000000000..39ac507f0809
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/OTBSpecific_XMLcreation.py
@@ -0,0 +1,681 @@
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ OTBUtils.py
+ ---------------------
+ Date : 11-12-13
+ Copyright : (C) 2013 by CS Systemes d'information (CS SI)
+ Email : otb at c-s dot fr (CS SI)
+ Contributors : Julien Malik (CS SI) - creation of otbspecific
+ Oscar Picas (CS SI) -
+ Alexia Mondot (CS SI) - split otbspecific into 2 files
+ add functions
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+
+When QGIS is run, OTB algorithms are created according to xml files from description/ directory.
+"""
+
+__author__ = 'Julien Malik, Oscar Picas, Alexia Mondot'
+__date__ = 'December 2013'
+__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
+# This will get replaced with a git SHA1 when you do a git archive
+__revision__ = '$Format:%H$'
+__version__ = "3.8"
+
+import copy
+
+try:
+ import processing
+except ImportError, e:
+ raise Exception("Processing must be installed and available in PYTHONPATH")
+
+try:
+ import otbApplication
+except ImportError, e:
+ raise Exception("OTB python plugins must be installed and available in PYTHONPATH")
+
+from processing.core.ProcessingLog import ProcessingLog
+from processing.core.ProcessingConfig import ProcessingConfig
+
+from processing.otb.OTBUtils import (renameValueField,
+ remove_dependant_choices,
+ remove_other_choices,
+ remove_parameter_by_key,
+ defaultSplit,
+ split_by_choice,
+ defaultWrite,
+ remove_choice,
+ remove_independant_choices )
+
+
+def getBinaryMorphologicalOperation(available_app, original_dom_document):
+ """
+ Let ball as only available structype.
+ Split the application according to its filter dilate, erode, opening, closing.
+ """
+ the_root = original_dom_document
+ renameValueField(the_root, 'structype.ball.xradius', 'name', 'The Structuring Element Radius')
+ renameValueField(the_root, 'structype.ball.xradius', 'description', 'The Structuring Element Radius')
+ remove_dependant_choices(the_root, 'structype', 'ball')
+ remove_other_choices(the_root, 'structype', 'ball')
+ remove_dependant_choices(the_root, 'filter', 'dilate')
+ remove_parameter_by_key(the_root, 'structype.ball.yradius')
+ #defaultWrite(available_app, the_root)
+ the_list = defaultSplit(available_app, the_root, 'filter')
+ return the_list
+
+
+def getEdgeExtraction(available_app, original_dom_document):
+ """
+ Let ball as only available filter (not an oval).
+ Split the application according to its filter gradient, sobel, touzi.
+ """
+ the_root = original_dom_document
+ renameValueField(the_root, 'filter.touzi.xradius', 'name', 'The Radius')
+ renameValueField(the_root, 'filter.touzi.xradius', 'description', 'The Radius')
+ remove_parameter_by_key(the_root, 'filter.touzi.yradius')
+ splitted = split_by_choice(the_root, 'filter')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getGrayScaleMorphologicalOperation(available_app, original_dom_document):
+ """
+ Let ball as only available structype.
+ Split the application according to its filter dilate, erode, opening, closing.
+ """
+ the_root = original_dom_document
+ renameValueField(the_root, 'structype.ball.xradius', 'name', 'The Structuring Element Radius')
+ renameValueField(the_root, 'structype.ball.xradius', 'description', 'The Structuring Element Radius')
+ remove_dependant_choices(the_root, 'structype', 'ball')
+ remove_other_choices(the_root, 'structype', 'ball')
+ remove_parameter_by_key(the_root, 'structype.ball.yradius')
+
+ splitted = defaultSplit(available_app, the_root, 'filter')
+ return splitted
+
+
+def getOrthoRectification(available_app, original_dom_document):
+ """
+ Let only mode auto.
+ Remove all parameters which should be updated once the input file given.
+ Split by SRS : EPSG, fit to ortho, lambert-wgs84 and UTM.
+ Each of these SRS have their own parameters modified in this fonction.
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+
+ remove_choice(the_root, 'outputs.mode', 'auto')
+ remove_independant_choices(the_root, 'outputs.mode', 'auto')
+ remove_choice(the_root, 'outputs.mode', 'outputroi')
+ remove_independant_choices(the_root, 'outputs.mode', 'outputroi')
+ remove_parameter_by_key(the_root, 'outputs.ulx')
+ remove_parameter_by_key(the_root, 'outputs.uly')
+ remove_parameter_by_key(the_root, 'outputs.sizex')
+ remove_parameter_by_key(the_root, 'outputs.sizey')
+ remove_parameter_by_key(the_root, 'outputs.spacingx')
+ remove_parameter_by_key(the_root, 'outputs.spacingy')
+ remove_parameter_by_key(the_root, 'outputs.lrx')
+ remove_parameter_by_key(the_root, 'outputs.lry')
+ remove_parameter_by_key(the_root, 'opt.rpc')
+
+ deleteGeoidSrtm(the_root)
+
+ remove_parameter_by_key(the_root, 'outputs.isotropic')
+
+ emptyMap = copy.deepcopy(the_root)
+
+ remove_parameter_by_key(the_root, 'outputs.ortho')
+ remove_choice(the_root, 'outputs.mode', 'orthofit')
+ remove_independant_choices(the_root, 'outputs.mode', 'orthofit')
+ merged = copy.deepcopy(the_root)
+
+
+
+
+ splitted = split_by_choice(the_root, 'map')
+ the_list = []
+
+ for key in splitted:
+ if key == 'utm':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'map.epsg.code')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+ elif key == 'epsg':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'map.utm.northhem')
+ remove_parameter_by_key(the_doc, 'map.utm.zone')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+
+ remove_choice(merged, 'map', 'utm')
+ remove_choice(merged, 'map', 'epsg')
+ remove_parameter_by_key(merged, 'map.epsg.code')
+ remove_parameter_by_key(merged, 'map.utm.northhem')
+ remove_parameter_by_key(merged, 'map.utm.zone')
+ old_app_name = merged.find('key').text
+ merged.find('key').text = '%s-%s' % (old_app_name, 'lambert-WGS84')
+ old_longname = merged.find('longname').text
+ merged.find('longname').text = '%s (%s)' % (old_app_name, 'lambert-WGS84')
+ defaultWrite('%s-%s' % (available_app, 'lambert-WGS84'), merged)
+ the_list.append(merged)
+
+ remove_parameter_by_key(emptyMap, 'map')
+ remove_parameter_by_key(emptyMap, 'map.epsg.code')
+ remove_parameter_by_key(emptyMap, 'map.utm.northhem')
+ remove_parameter_by_key(emptyMap, 'map.utm.zone')
+ remove_choice(emptyMap, 'outputs.mode', 'autosize')
+ remove_independant_choices(emptyMap, 'outputs.mode', 'autosize')
+ remove_choice(emptyMap, 'outputs.mode', 'autospacing')
+ remove_independant_choices(emptyMap, 'outputs.mode', 'autospacing')
+ old_app_name = emptyMap.find('key').text
+ emptyMap.find('key').text = '%s-%s' % (old_app_name, 'fit-to-ortho')
+ old_longname = emptyMap.find('longname').text
+ emptyMap.find('longname').text = '%s (%s)' % (old_app_name, 'fit-to-ortho')
+ defaultWrite('%s-%s' % (available_app, 'fit-to-ortho'), emptyMap)
+ the_list.append(emptyMap)
+
+ return the_list
+
+
+def getDimensionalityReduction(available_app, original_dom_document):
+ """
+ Remove rescale.outmin and rescale.outmax and split by method (ica, maf, napca and pca) and adjust parameters of each resulting app.
+ """
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'rescale.outmin')
+ remove_parameter_by_key(the_root, 'rescale.outmax')
+ splitted = split_by_choice(the_root, 'method')
+ the_list = []
+ for key in splitted:
+ if key == 'maf':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'outinv')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+ else:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getPansharpening(available_app, original_dom_document):
+ """
+ Split by method (bayes, lmvm, rcs)
+ """
+ the_root = original_dom_document
+ splitted = split_by_choice(the_root, 'method')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getPixelValue(available_app, original_dom_document):
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'cl')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getExtractROI(available_app, original_dom_document):
+ """
+ Split by mode (standard, fit)
+ Adapt parameters of each resulting app.
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'cl')
+ deleteGeoidSrtm(the_root)
+ splitted = split_by_choice(the_root, 'mode')
+ the_list = []
+ for key in splitted:
+ if key == 'standard':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'mode.fit.elev.dem')
+ remove_parameter_by_key(the_doc, 'mode.fit.elev.geoid')
+ remove_parameter_by_key(the_doc, 'mode.fit.elev.default')
+ remove_parameter_by_key(the_doc, 'mode.fit.ref')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+ else:
+ #key == 'fit'
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'startx')
+ remove_parameter_by_key(the_doc, 'starty')
+ remove_parameter_by_key(the_doc, 'sizex')
+ remove_parameter_by_key(the_doc, 'sizey')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getQuicklook(available_app, original_dom_document):
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'cl')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getRigidTransformResample(available_app, original_dom_document):
+ """
+ split by transformation (id, rotation, translation)
+ """
+ the_root = original_dom_document
+ splitted = split_by_choice(the_root, 'transform.type')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getHomologousPointsExtraction(available_app, original_dom_document):
+ the_list = defaultSplit(available_app, original_dom_document, 'mode')
+ return the_list
+
+
+def getGenerateRPCSensorModel(available_app, original_dom_document):
+ the_root = original_dom_document
+ remove_dependant_choices(the_root, 'map', 'wgs')
+ remove_other_choices(the_root, 'map', 'wgs')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getRefineSensorModel(available_app, original_dom_document):
+ the_root = original_dom_document
+ remove_dependant_choices(the_root, 'map', 'wgs')
+ remove_other_choices(the_root, 'map', 'wgs')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getSegmentation(available_app, original_dom_document):
+ """
+ Remove the choice raster and split by filter (cc, edison, meanshift, mprofiles, watershed)
+ """
+ the_root = original_dom_document
+ #remove_choice(the_root, 'filter', 'edison')
+ #remove_independant_choices(the_root, 'filter', 'edison')
+ #remove_choice(the_root, 'filter', 'meanshift')
+ #remove_independant_choices(the_root, 'filter', 'meanshift')
+ remove_choice(the_root, 'mode', 'raster')
+ remove_independant_choices(the_root, 'mode', 'raster')
+ splitted = split_by_choice(the_root, 'filter')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getKMeansClassification(available_app, original_dom_document):
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'rand')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getTrainSVMImagesClassifier(available_app, original_dom_document):
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'rand')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getComputeConfusionMatrix(available_app, original_dom_document):
+ """
+ Split by ref (raster, vector)
+ """
+ the_root = original_dom_document
+ #remove_independant_choices(the_root, 'ref', 'vector')
+ #remove_choice(the_root, 'ref', 'vector')
+ #defaultWrite(available_app, the_root)
+
+ splitted = split_by_choice(the_root, 'ref')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+ return [the_root]
+
+
+def getOpticalCalibration(available_app, original_dom_document):
+ """
+ Remove toc options (let toa) and remove all about atmo
+ """
+ #the_list = defaultSplit(available_app, original_dom_document, 'level')
+ the_root = original_dom_document
+ remove_independant_choices(the_root, 'level', 'toc')
+ remove_choice(the_root, 'level', 'toc')
+ remove_parameter_by_key(the_root, 'atmo.aerosol')
+ remove_parameter_by_key(the_root, 'atmo.oz')
+ remove_parameter_by_key(the_root, 'atmo.wa')
+ remove_parameter_by_key(the_root, 'atmo.pressure')
+ remove_parameter_by_key(the_root, 'atmo.opt')
+ remove_parameter_by_key(the_root, 'atmo.aeronet')
+ remove_parameter_by_key(the_root, 'radius')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getSarRadiometricCalibration(available_app, original_dom_document):
+ # TODO ** before doing anything, check support for SAR data in Qgis
+ the_root = original_dom_document
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getSmoothing(available_app, original_dom_document):
+ """
+ Split by type (anidif, gaussian, mean)
+ """
+
+ #import copy
+ #the_root = copy.deepcopy(original_dom_document)
+ #remove_dependant_choices(the_root, 'type', 'anidif')
+ #remove_other_choices(the_root, 'type', 'anidif')
+ #defaultWrite('%s-anidif' % available_app, the_root)
+
+ #the_root = copy.deepcopy(original_dom_document)
+ #remove_independant_choices(the_root, 'type', 'anidif')
+ #remove_choice(the_root, 'type', 'anidif')
+ #defaultWrite(available_app, the_root)
+
+ the_root = original_dom_document
+ splitted = split_by_choice(the_root, 'type')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+
+ return the_list
+ #splitted = split_by_choice(the_root, 'type')
+ #the_list = []
+ #for key in splitted:
+ # defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ # the_list.append(splitted[key])
+ #return the_list
+
+def getColorMapping(available_app, original_dom_document):
+ """
+ Remove the option colortolabel
+ Split by method : custom, continous, optimal and image and adapt parameters of each resulting app
+ """
+ the_root = original_dom_document
+ remove_independant_choices(the_root, 'op', 'colortolabel')
+ remove_choice(the_root, 'op', 'colortolabel')
+ splitted = split_by_choice(the_root, 'method')
+ the_list = []
+ for key in splitted:
+ if key == 'custom':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'method.continuous.lut')
+ remove_parameter_by_key(the_doc, 'method.continuous.min')
+ remove_parameter_by_key(the_doc, 'method.continuous.max')
+ remove_parameter_by_key(the_doc, 'method.optimal.background')
+ remove_parameter_by_key(the_doc, 'method.image.in')
+ remove_parameter_by_key(the_doc, 'method.image.low')
+ remove_parameter_by_key(the_doc, 'method.image.up')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+ elif key == 'continuous':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'method.custom.lut')
+ remove_parameter_by_key(the_doc, 'method.optimal.background')
+ remove_parameter_by_key(the_doc, 'method.image.in')
+ remove_parameter_by_key(the_doc, 'method.image.low')
+ remove_parameter_by_key(the_doc, 'method.image.up')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+ elif key == 'optimal':
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'method.custom.lut')
+ remove_parameter_by_key(the_doc, 'method.continuous.lut')
+ remove_parameter_by_key(the_doc, 'method.continuous.min')
+ remove_parameter_by_key(the_doc, 'method.continuous.max')
+ remove_parameter_by_key(the_doc, 'method.image.in')
+ remove_parameter_by_key(the_doc, 'method.image.low')
+ remove_parameter_by_key(the_doc, 'method.image.up')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(the_doc)
+ else:
+ #key == 'image'
+ the_doc = splitted[key]
+ remove_parameter_by_key(the_doc, 'method.custom.lut')
+ remove_parameter_by_key(the_doc, 'method.continuous.lut')
+ remove_parameter_by_key(the_doc, 'method.continuous.min')
+ remove_parameter_by_key(the_doc, 'method.continuous.max')
+ remove_parameter_by_key(the_doc, 'method.optimal.background')
+ defaultWrite('%s-%s' % (available_app, key), the_doc)
+ the_list.append(splitted[key])
+ return the_list
+
+
+
+def getFusionOfClassifications(available_app, original_dom_document):
+ """
+ Split by method of fusion of classification (dempstershafer, majorityvoting)
+ """
+ the_root = original_dom_document
+ splitted = split_by_choice(the_root, 'method')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+def getTrainImagesClassifier(available_app, original_dom_document):
+ """
+ Split by classifier (ann, bayes, boost, dt, gbt, knn, libsvm, rf, svm)
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ deleteGeoidSrtm(the_root)
+ splitted = split_by_choice(the_root, 'classifier')
+ the_list = []
+ for key in splitted:
+ defaultWrite('%s-%s' % (available_app, key), splitted[key])
+ the_list.append(splitted[key])
+ return the_list
+
+
+
+def getLineSegmentDetection(available_app, original_dom_document):
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'elev.default')
+ remove_parameter_by_key(the_root, 'elev.geoid')
+ remove_parameter_by_key(the_root, 'elev.dem')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+
+def getImageEnvelope(available_app, original_dom_document):
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'elev.default')
+ remove_parameter_by_key(the_root, 'elev.geoid')
+ remove_parameter_by_key(the_root, 'elev.dem')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getReadImageInfo(available_app, original_dom_document):
+ """
+ Remove parameters that are output of the application.
+ """
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'outkwl')
+ remove_parameter_by_key(the_root, 'indexx')
+ remove_parameter_by_key(the_root, 'indexy')
+ remove_parameter_by_key(the_root, 'sizex')
+ remove_parameter_by_key(the_root, 'sizey')
+ remove_parameter_by_key(the_root, 'spacingx')
+ remove_parameter_by_key(the_root, 'spacingy')
+ remove_parameter_by_key(the_root, 'originx')
+ remove_parameter_by_key(the_root, 'originy')
+ remove_parameter_by_key(the_root, 'estimatedgroundspacingx')
+ remove_parameter_by_key(the_root, 'estimatedgroundspacingy')
+ remove_parameter_by_key(the_root, 'numberbands')
+ remove_parameter_by_key(the_root, 'sensor')
+ remove_parameter_by_key(the_root, 'id')
+ remove_parameter_by_key(the_root, 'time')
+ remove_parameter_by_key(the_root, 'ullat')
+ remove_parameter_by_key(the_root, 'ullon')
+ remove_parameter_by_key(the_root, 'urlat')
+ remove_parameter_by_key(the_root, 'urlon')
+ remove_parameter_by_key(the_root, 'lrlat')
+ remove_parameter_by_key(the_root, 'lrlon')
+ remove_parameter_by_key(the_root, 'lllat')
+ remove_parameter_by_key(the_root, 'lllon')
+ remove_parameter_by_key(the_root, 'town')
+ remove_parameter_by_key(the_root, 'country')
+ remove_parameter_by_key(the_root, 'rgb.r')
+ remove_parameter_by_key(the_root, 'rgb.g')
+ remove_parameter_by_key(the_root, 'rgb.b')
+ remove_parameter_by_key(the_root, 'projectionref')
+ remove_parameter_by_key(the_root, 'keyword')
+ remove_parameter_by_key(the_root, 'gcp.count')
+ remove_parameter_by_key(the_root, 'gcp.proj')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+
+def getComputeModulusAndPhase(available_app, original_dom_document):
+ """
+ Split the application according the field nbinput.
+ For each of the resulting apps, give a new name.
+ """
+ the_root = original_dom_document
+ splitted = split_by_choice(the_root, 'nbinput')
+ the_list = []
+ for key in splitted:
+ if key == 'one':
+ the_doc = splitted[key]
+ old_app_name = the_doc.find('key').text
+ the_doc.find('key').text = '%s-%s' % (old_app_name, 'OneEntry')
+ old_longname = the_doc.find('longname').text
+ the_doc.find('longname').text = '%s (%s)' % (old_app_name, 'OneEntry')
+ defaultWrite('%s-%s' % (available_app, 'OneEntry'), the_doc)
+ the_list.append(the_doc)
+ else :
+ the_doc = splitted[key]
+ old_app_name = the_doc.find('key').text
+ the_doc.find('key').text = '%s-%s' % (old_app_name, 'TwoEntries')
+ old_longname = the_doc.find('longname').text
+ the_doc.find('longname').text = '%s (%s)' % (old_app_name, 'TwoEntries')
+ defaultWrite('%s-%s' % (available_app, 'TwoEntries'), the_doc)
+ the_list.append(the_doc)
+ return the_list
+
+
+def getCompareImages(available_app, original_dom_document):
+ """
+ Remove mse, mae, psnr as they are output of the algorithm.
+ """
+ the_root = original_dom_document
+ remove_parameter_by_key(the_root, 'mse')
+ remove_parameter_by_key(the_root, 'mae')
+ remove_parameter_by_key(the_root, 'psnr')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getRadiometricIndices(available_app, original_dom_document):
+ """
+ These 3 indices are missing. Remove them from the list.
+ """
+ the_root = original_dom_document
+ remove_choice(the_root, 'list', 'laindvilog')
+ remove_choice(the_root, 'list', 'lairefl')
+ remove_choice(the_root, 'list', 'laindviformo')
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getConnectedComponentSegmentation(available_app, original_dom_document):
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ deleteGeoidSrtm( the_root )
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getKmzExport(available_app, original_dom_document):
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ deleteGeoidSrtm( the_root )
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getSuperimpose(available_app, original_dom_document):
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ deleteGeoidSrtm( the_root )
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+def getStereoFramework(available_app, original_dom_document):
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ the_root = original_dom_document
+ deleteGeoidSrtm( the_root )
+ defaultWrite(available_app, the_root)
+ return [the_root]
+
+
+
+def deleteGeoidSrtm(doc) :
+ """
+ Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
+ The values are picked from the settings.
+ """
+ t4 = [item for item in doc.findall('.//parameter') if item.find('key').text.endswith("elev.geoid")]
+ for t5 in t4:
+ doc.remove(t5)
+
+ t4 = [item for item in doc.findall('.//parameter') if item.find('key').text.endswith("elev.dem")]
+ for t5 in t4:
+ doc.remove(t5)
diff --git a/python/plugins/processing/otb/maintenance/OTBTester.py b/python/plugins/processing/otb/maintenance/OTBTester.py
new file mode 100644
index 000000000000..9701df546df4
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/OTBTester.py
@@ -0,0 +1,432 @@
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ OTBTester.py
+ ---------------------
+ Copyright : (C) 2013 by CS Systemes d'information (CS SI)
+ Email : otb at c-s dot fr (CS SI)
+ Contributors : Julien Malik (CS SI)
+ Oscar Picas (CS SI)
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+"""
+__author__ = 'Julien Malik, Oscar Picas'
+__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
+# This will get replaced with a git SHA1 when you do a git archive
+__revision__ = '$Format:%H$'
+
+import unittest
+import ConfigParser
+import io
+
+from parsing import (
+ File, Command, Comment, BlankLine, Arg, parse, prettify)
+
+from string import Template
+import os
+import traceback
+import logging
+import copy
+
+from ConfigParser import SafeConfigParser
+
+from processing.otb.OTBHelper import get_OTB_log
+
+class LowerTemplate(Template):
+ def safe_substitute(self, param):
+ ret = super(LowerTemplate, self).safe_substitute(param).lower()
+ return ret
+
+class MakefileParser(object):
+ def __init__(self):
+ self.maxDiff = None
+ self.parser = SafeConfigParser()
+ self.parser.read('otbcfg.ini')
+ if not os.path.exists('otbcfg.ini'):
+ raise Exception("OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified in the file otbcfg.ini")
+
+ self.root_dir = self.parser.get('otb','checkout_dir')
+ if not os.path.exists(self.root_dir):
+ raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there")
+ self.build_dir = self.parser.get('otb', 'build_dir')
+ if not os.path.exists(self.build_dir):
+ raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there")
+ self.logger = get_OTB_log()
+
+ def test_CMakelists(self):
+ provided = {}
+ provided["OTB_SOURCE_DIR"] = self.root_dir
+ provided["OTB_BINARY_DIR"] = self.build_dir
+ provided["OTB_DATA_LARGEINPUT_ROOT"] = os.path.normpath(os.path.join(self.root_dir, "../OTB-Data/Input"))
+
+ try:
+ with open(os.path.join(self.root_dir, "CMakeLists.txt")) as file_input:
+ content = file_input.read()
+ output = parse(content)
+
+ defined_paths = [each for each in output if 'Command' in str(type(each)) and "FIND_PATH" in each.name]
+ the_paths = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in defined_paths}
+
+ the_sets = [each for each in output if 'Command' in str(type(each)) and "SET" in each.name.upper()]
+ the_sets = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in the_sets}
+ the_sets = {key : " ".join(the_sets[key]) for key in the_sets}
+
+ the_strings = set([each.body[-1].contents for each in output if 'Command' in str(type(each)) and "STRING" in each.name.upper()] )
+
+ def mini_clean(item):
+ if item.startswith('"') and item.endswith('"') and " " not in item:
+ return item[1:-1]
+ return item
+
+ the_sets = {key : mini_clean(the_sets[key]) for key in the_sets}
+
+ def templatize(item):
+ if "$" in item:
+ return Template(item)
+ return item
+
+ for key in the_sets:
+ if key in the_strings:
+ the_sets[key] = the_sets[key].lower()
+
+ the_sets = {key : templatize(the_sets[key]) for key in the_sets}
+
+ for path in the_paths:
+ target_file = the_paths[path][1]
+ suggested_paths = []
+ if len(the_paths[path]) > 2:
+ suggested_paths = the_paths[path][2:]
+
+ try:
+ provided[path] = find_file(target_file)
+ except Exception, e:
+ for each in suggested_paths:
+ st = Template(each)
+ pac = os.path.abspath(st.safe_substitute(provided))
+ if os.path.exists(pac):
+ provided[path] = pac
+ break
+
+ resolve_dict(provided, the_sets)
+ provided.update(the_sets)
+
+ return provided
+ except Exception, e:
+ traceback.print_exc()
+ self.fail(e.message)
+
+ def add_make(self, previous_context, new_file):
+ input = open(new_file).read()
+ output = parse(input)
+ apps = [each for each in output if 'Command' in str(type(each))]
+ setcommands = [each for each in apps if 'SET' in each.name.upper()]
+ stringcommands = [each for each in apps if 'STRING' in each.name.upper()]
+
+ environment = previous_context
+
+ def mini_clean(item):
+ if item.startswith('"') and item.endswith('"') and " " not in item:
+ return item[1:-1]
+ return item
+
+ new_env = {}
+ for command in setcommands:
+ key = command.body[0].contents
+ ct = " ".join([item.contents for item in command.body[1:]])
+ ct = mini_clean(ct)
+
+ if "$" in ct:
+ values = Template(ct)
+ else:
+ values = ct
+
+ new_env[key] = values
+
+ for stringcommand in stringcommands:
+ key = stringcommand.body[-1].contents
+ ct = stringcommand.body[-2].contents
+ ct = mini_clean(ct.lower())
+
+ if "$" in ct:
+ values = LowerTemplate(ct)
+ else:
+ values = ct
+ new_env[key] = values
+
+ resolve_dict(environment, new_env)
+ environment.update(new_env)
+
+ return environment
+
+ def get_apps(self, the_makefile, the_dict):
+ input = open(the_makefile).read()
+ output = parse(input)
+ apps = [each for each in output if 'Command' in str(type(each))]
+ otb_apps = [each for each in apps if 'OTB_TEST_APPLICATION' in each.name.upper()]
+ return otb_apps
+
+ def get_tests(self, the_makefile, the_dict):
+ input = open(the_makefile).read()
+ output = parse(input)
+ apps = [each for each in output if 'Command' in str(type(each))]
+ otb_tests = [each for each in apps if 'ADD_TEST' in each.name.upper()]
+ return otb_tests
+
+ def get_apps_with_context(self, the_makefile, the_dict):
+ input = open(the_makefile).read()
+ output = parse(input)
+
+ def is_a_command(item):
+ return 'Command' in str(type(item))
+
+ appz = []
+ context = []
+ for each in output:
+ if is_a_command(each):
+ if 'FOREACH' in each.name and 'ENDFOREACH' not in each.name:
+ args = [item.contents for item in each.body]
+ context.append(args)
+ elif 'ENDFOREACH' in each.name:
+ context.pop()
+ elif 'OTB_TEST_APPLICATION' in each.name.upper():
+ appz.append((each, context[:]))
+ return appz
+
+ def get_name_line(self, the_list, the_dict):
+ items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
+ itemz = [[], [], [], [], []]
+ last_index = 0
+ for each in the_list:
+ if each.contents in items:
+ last_index = items.index(each.contents)
+ else:
+ itemz[last_index].append(each.contents)
+ result = itemz[0][0]
+ the_string = Template(result).safe_substitute(the_dict)
+
+ if '$' in the_string:
+ neo_dict = the_dict
+ the_string = Template(the_string).safe_substitute(neo_dict)
+ while '$' in the_string:
+ try:
+ the_string = Template(the_string).substitute(neo_dict)
+ except KeyError, e:
+ self.logger.warning("Key %s is not found in makefiles" % e.message)
+ neo_dict[e.message] = ""
+
+ if 'string.Template' in the_string:
+ raise Exception("Unexpected toString call in %s" % the_string)
+
+ return the_string
+
+ def get_command_line(self, the_list, the_dict):
+ items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
+ itemz = [[], [], [], [], []]
+ last_index = 0
+ for each in the_list:
+ if each.contents in items:
+ last_index = items.index(each.contents)
+ else:
+ itemz[last_index].append(each.contents)
+ result = []
+ result.extend(["otbcli_%s" % each for each in itemz[1]])
+
+ if len(result[0]) == 7:
+ raise Exception("App name is empty !")
+
+ result.extend(itemz[2])
+ result.append("-testenv")
+ result.extend(itemz[3])
+ the_string = Template(" ".join(result)).safe_substitute(the_dict)
+
+ if '$' in the_string:
+ neo_dict = the_dict
+ the_string = Template(" ".join(result)).safe_substitute(neo_dict)
+ while '$' in the_string:
+ try:
+ the_string = Template(the_string).substitute(neo_dict)
+ except KeyError, e:
+ self.logger.warning("Key %s is not found in makefiles" % e.message)
+ neo_dict[e.message] = ""
+
+ if 'string.Template' in the_string:
+ raise Exception("Unexpected toString call in %s" % the_string)
+
+ return the_string
+
+ def get_test(self, the_list, the_dict):
+ items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
+ itemz = [[], [], [], [], []]
+ last_index = 0
+ for each in the_list:
+ if each.contents in items:
+ last_index = items.index(each.contents)
+ else:
+ itemz[last_index].append(each.contents)
+ result = ["otbTestDriver"]
+ result.extend(itemz[4])
+
+ if len(result) == 1:
+ return ""
+
+ the_string = Template(" ".join(result)).safe_substitute(the_dict)
+
+ if '$' in the_string:
+ neo_dict = the_dict
+ the_string = Template(" ".join(result)).safe_substitute(neo_dict)
+ while '$' in the_string:
+ try:
+ the_string = Template(the_string).substitute(neo_dict)
+ except KeyError, e:
+ self.logger.warning("Key %s is not found in makefiles" % e.message)
+ neo_dict[e.message] = ""
+
+ if 'string.Template' in the_string:
+ raise Exception("Unexpected toString call in %s" % the_string)
+
+ return the_string
+
+ def test_algos(self):
+ tests = {}
+
+ algos_dir = os.path.join(self.root_dir, "Testing/Applications")
+ makefiles = find_files("CMakeLists.txt", algos_dir)
+ to_be_excluded = os.path.join(self.root_dir, "Testing/Applications/CMakeLists.txt")
+ if to_be_excluded in makefiles:
+ makefiles.remove(to_be_excluded)
+
+ resolve_algos = {}
+ for makefile in makefiles:
+ intermediate_makefiles = []
+ path = makefile.split(os.sep)[len(self.root_dir.split(os.sep)):-1]
+ for ind in range(len(path)):
+ tmp_path = path[:ind+1]
+ tmp_path.append("CMakeLists.txt")
+ tmp_path = os.sep.join(tmp_path)
+ candidate_makefile = os.path.join(self.root_dir, tmp_path)
+ if os.path.exists(candidate_makefile):
+ intermediate_makefiles.append(candidate_makefile)
+ resolve_algos[makefile] = intermediate_makefiles
+
+ dict_for_algo = {}
+ for makefile in makefiles:
+ basic = self.test_CMakelists()
+ last_context = self.add_make(basic, os.path.join(self.root_dir, "Testing/Utilities/CMakeLists.txt"))
+ for intermediate_makefile in resolve_algos[makefile]:
+ last_context = self.add_make(last_context, intermediate_makefile)
+ dict_for_algo[makefile] = last_context
+
+ for makefile in makefiles:
+ appz = self.get_apps_with_context(makefile, dict_for_algo[makefile])
+
+ for app, context in appz:
+ if len(context) == 0:
+ import copy
+ ddi = copy.deepcopy(dict_for_algo[makefile])
+ tk_dict = autoresolve(ddi)
+ tk_dict = autoresolve(tk_dict)
+
+ name_line = self.get_name_line(app.body, tk_dict)
+ command_line = self.get_command_line(app.body, tk_dict)
+ test_line = self.get_test(app.body, tk_dict)
+
+ if '$' in test_line or '$' in command_line:
+ if '$' in command_line:
+ self.logger.error(command_line)
+ if '$' in test_line:
+ self.logger.warning(test_line)
+ else:
+ tests[name_line] = (command_line, test_line)
+ else:
+ contexts = {}
+ for iteration in context:
+ key = iteration[0]
+ values = [each[1:-1].lower() for each in iteration[1:]]
+ contexts[key] = values
+
+ keyorder = contexts.keys()
+ import itertools
+ pool = [each for each in itertools.product(*contexts.values())]
+
+ import copy
+ for poolinstance in pool:
+ neo_dict = copy.deepcopy(dict_for_algo[makefile])
+ zipped = zip(keyorder, poolinstance)
+ for each in zipped:
+ neo_dict[each[0]] = each[1]
+
+ ak_dict = autoresolve(neo_dict)
+ ak_dict = autoresolve(ak_dict)
+ ak_dict = autoresolve(ak_dict)
+
+ ddi = ak_dict
+
+ name_line = self.get_name_line(app.body, ddi)
+ command_line = self.get_command_line(app.body, ddi)
+ test_line = self.get_test(app.body, ddi)
+
+ if '$' in command_line or '$' not in test_line:
+ if '$' in command_line:
+ self.logger.error(command_line)
+ if '$' in test_line:
+ self.logger.warning(test_line)
+ else:
+ tests[name_line] = (command_line, test_line)
+
+ return tests
+
+def autoresolve(a_dict):
+ def as_template(item, b_dict):
+ if hasattr(item, 'safe_substitute'):
+ return item.safe_substitute(b_dict)
+ ate = Template(item)
+ return ate.safe_substitute(b_dict)
+ templatized = {key: as_template(a_dict[key], a_dict) for key in a_dict.keys() }
+ return templatized
+
+
+def find_file(file_name, base_dir = os.curdir):
+ import os
+ for root, dirs, files in os.walk(base_dir, topdown=False):
+ for name in files:
+ if name == file_name:
+ return os.path.join(root, name)
+ raise Exception("File not found %s" % file_name)
+
+def find_files(file_name, base_dir = os.curdir):
+ import os
+ result = []
+ for root, dirs, files in os.walk(base_dir, topdown=False):
+ for name in files:
+ if name == file_name:
+ result.append(os.path.join(root, name))
+ return result
+
+def resolve_dict(adia, adib):
+ init = len(adia)
+ fin = len(adia) + 1
+ def _resolve_dict(dia, dib):
+ for key in dib:
+ cand_value = dib[key]
+ if hasattr(cand_value, 'safe_substitute'):
+ value = cand_value.safe_substitute(dia)
+ if type(value) == type(".") and "$" not in value:
+ dia[key] = value
+ else:
+ dia[key] = cand_value
+ for key in dia:
+ if key in dib:
+ del dib[key]
+
+ while(init != fin):
+ init = len(adia)
+ _resolve_dict(adia, adib)
+ fin = len(adia)
+
diff --git a/python/plugins/processing/otb/maintenance/README b/python/plugins/processing/otb/maintenance/README
new file mode 100644
index 000000000000..dfadf450ae21
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/README
@@ -0,0 +1,11 @@
+
+Requirement
+1) qgis
+2) sextante_taudem (TauDEM-1.1.1.zip)
+3) sextante (sextante-1.0.9.zip)
+4) PYTHONPATH=.:/usr/lib/otb/python:/usr/share/qgis/python/plugins:~/.qgis2/python/plugins
+5) ITK_AUTOLOAD_PATH=/usr/lib/otb/applications
+
+Creating xml files
+cd .qgis2/python/plugin/sextante/otb/maintenance
+python ./OTBHelper.py
diff --git a/python/plugins/processing/otb/maintenance/TestOTBAlgorithms.py b/python/plugins/processing/otb/maintenance/TestOTBAlgorithms.py
new file mode 100644
index 000000000000..b3a37ccef0e8
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/TestOTBAlgorithms.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ TestOTBAlgorithms.py
+ ---------------------
+ Copyright : (C) 2013 by CS Systemes d'information
+ Email : otb at c-s dot fr
+ Contributors : Oscar Picas
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+"""
+
+# This will get replaced with a git SHA1 when you do a git archive
+__revision__ = '$Format:%H$'
+
+import unittest
+import signal
+import sys
+import os
+import traceback
+import xml.etree.ElementTree as ET
+import shlex, subprocess
+import shelve
+
+try:
+ import processing
+except ImportError, e:
+ raise Exception("Processing must be installed and available in PYTHONPATH")
+
+try:
+ import otbApplication
+except ImportError, e:
+ raise Exception("OTB python plugins must be installed and available in PYTHONPATH")
+
+from processing.otb.OTBAlgorithm import OTBAlgorithm
+from processing.otb.OTBHelper import *
+from processing.otb.OTBTester import MakefileParser
+
+class Alarm(Exception):
+ pass
+
+def alarm_handler(signum, frame):
+ raise Alarm
+
+class AlgoTestCase(unittest.TestCase):
+ def setUp(self):
+ self.logger = get_OTB_log()
+ self.the_files = [os.path.join(os.path.join(os.path.abspath(os.curdir), 'description'),each) for each in os.listdir(os.path.join(os.path.abspath(os.curdir), 'description')) if '.xml' in each]
+
+ def tearDown(self):
+ self.logger = None
+
+class TestSequence(unittest.TestCase):
+ def setUp(self):
+ self.data = shelve.open("tests.shelve",writeback=True)
+
+ def tearDown(self):
+ self.data.close()
+
+def ut_generator(test_name, a_tuple):
+ def test(self):
+ logger = get_OTB_log()
+
+ needs_update = False
+ if test_name not in self.data:
+ needs_update = True
+ if test_name in self.data:
+ if (self.data[test_name][0] != a_tuple[0]) or (self.data[test_name][1] != a_tuple[1]) or (self.data[test_name][2] is False):
+ needs_update = True
+
+ if needs_update:
+ signal.signal(signal.SIGALRM, alarm_handler)
+ signal.alarm(6*60) # 6 minutes
+
+ black_list = []
+
+ ut_command = a_tuple[0]
+ self.assertTrue(ut_command != None)
+ self.assertTrue(ut_command != "")
+
+ ut_command_validation = a_tuple[1]
+ self.assertTrue(ut_command_validation != None)
+ self.assertTrue(ut_command_validation != "")
+
+ if ut_command.split(" ")[0] in black_list:
+ raise Exception("Blacklisted test !")
+
+ args = shlex.split(ut_command)
+ failed = False
+ logger.info("Running [%s]" % ut_command)
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (pout, perr) = p.communicate()
+ if ("ERROR" in pout or "ERROR" in perr) or ("FATAL" in pout or "FATAL" in perr) or ("CRITICAL" in pout or "CRITICAL" in perr):
+ error_text = "Command [%s] returned [%s]" % (ut_command, pout)
+ if "Invalid image filename" in pout or "Invalid vector data filename" in pout or "Failed to open" in pout:
+ logger.warning(error_text)
+ else:
+ logger.error(error_text)
+ self.fail(error_text)
+ failed = True
+ else:
+ logger.info(pout)
+
+ if (len(ut_command_validation) > 0) and not failed:
+ new_ut_command_validation = ut_command_validation + " Execute " + ut_command
+
+ logger.info("Running Unit test [%s]" % new_ut_command_validation)
+ argz = shlex.split(new_ut_command_validation)
+ q = subprocess.Popen(argz, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (qout, qerr) = q.communicate()
+
+ if not ("Test EXIT SUCCESS" in qout or "Test EXIT SUCCESS" in qerr):
+ error_text = "Unit test [%s] returned [%s]" % (new_ut_command_validation, qout)
+ if "Invalid image filename" in qout or "Invalid vector data filename" in qout or "Failed to open" in qout:
+ logger.warning(error_text)
+ else:
+ logger.error(error_text)
+ self.fail(error_text)
+ else:
+ logger.info(qout)
+
+ signal.alarm(0)
+ self.data[test_name] = [ a_tuple[0], a_tuple[1], failed ]
+ else:
+ logger.info("Passed test: %s" % test_name)
+
+
+ return test
+
+def get_client_apps():
+ app_clients = []
+ for available_app in otbApplication.Registry.GetAvailableApplications():
+ app_instance = otbApplication.Registry.CreateApplication(available_app)
+ app_instance.UpdateParameters()
+ ct = "otbcli_" + available_app
+ app_clients.append(ct)
+ return app_clients
+
+def unfiltered_processing_mapping():
+ mkf = MakefileParser()
+ the_tests = mkf.test_algos()
+ for t in the_tests:
+ test_name = 'test_std_%s' % t
+ if the_tests[t][1] is None:
+ skip = True
+ else:
+ if the_tests[t][1] == "":
+ skip = True
+
+ if not skip:
+ test = ut_generator(test_name, the_tests[t])
+ setattr(TestSequence, test_name, test)
+
+ suite = unittest.TestLoader().loadTestsFromTestCase(TestSequence)
+ unittest.TextTestRunner(verbosity=2).run(suite)
+
+def test_processing_mapping():
+ mkf = MakefileParser()
+ the_tests = mkf.test_algos()
+ clients = get_client_apps()
+
+ already_tested = set()
+
+ for t in the_tests:
+ test_name = 'test_%s' % t
+ if the_tests[t][0].split(" ")[0] in clients:
+ skip = False
+ if the_tests[t][1] is None:
+ skip = True
+ else:
+ if the_tests[t][1] == "":
+ skip = True
+
+ if not skip:
+ runnable = the_tests[t][0].split(" ")[0]
+ if runnable not in already_tested:
+ test = ut_generator(test_name, the_tests[t])
+ setattr(TestSequence, test_name, test)
+ already_tested.add(runnable)
+
+ suite = unittest.TestLoader().loadTestsFromTestCase(TestSequence)
+ unittest.TextTestRunner(verbosity=2).run(suite)
+
+def test_xml_generation():
+ create_xml_descriptors()
+
+if __name__ == '__main__':
+ import processing
+ mkf = MakefileParser()
+ the_tests = mkf.test_algos()
+ for t in the_tests:
+ test_name = 'test_%s' % t
+ test = ut_generator(test_name, the_tests[t])
+ setattr(TestSequence, test_name, test)
+ unittest.main()
diff --git a/python/plugins/processing/otb/maintenance/black_list.xml b/python/plugins/processing/otb/maintenance/black_list.xml
new file mode 100644
index 000000000000..3bf5707c70dd
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/black_list.xml
@@ -0,0 +1,45 @@
+
+
+
+ HomologousPointsExtraction
+ GenerateRPCSensorModel
+ RefineSensorModel
+ Quicklook
+ PixelValue
+
+
+ SarRadiometricCalibration
+ PixelValue
+ Quicklook
+ ConvertCartoToGeoPoint
+ ConvertSensorToGeoPoint
+ ObtainUTMZoneFromGeoPoint
+ BundleToPerfectSensor
+ Example
+ ComputePolylineFeatureFromImage
+ DSFuzzyModelEstimation
+ HomologousPointsExtraction
+ SFSTextureExtraction
+ VectorDataDSValidation
+ GenerateRPCSensorModel
+ GridBasedImageResampling
+ GeneratePlyFile
+ RefineSensorModel
+ MultiResolutionPyramid
+ HyperspectralUnmixing
+ OSMDownloader
+ VertexComponentAnalysis
+ Rasterization
+ VectorDataTransform
+ VectorDataReprojection
+ VectorDataSetField
+ VectorDataExtractROIApplication
+ Convert
+ DownloadSRTMTiles
+ DisparityMapToElevationMap
+ FineRegistration
+ StereoRectificationGridGenerator
+ BlockMatching
+ SplitImage
+
+
diff --git a/python/plugins/processing/otb/maintenance/parsing.py b/python/plugins/processing/otb/maintenance/parsing.py
new file mode 100644
index 000000000000..18a6e294b3f8
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/parsing.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ parsing.py
+ ---------------------
+ Copyright : (C) 2013 by CS Systemes d'information (CS SI)
+ Email : otb at c-s dot fr (CS SI)
+ Contributors : Julien Malik (CS SI)
+ Oscar Picas (CS SI)
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+"""
+__author__ = 'Julien Malik, Oscar Picas'
+__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
+
+from collections import namedtuple
+import re
+import os
+
+def merge_pairs(list, should_merge, merge):
+ """
+ Merges adjacent elements of list using the function merge
+ if they satisfy the predicate should_merge.
+ """
+ ret = []
+ i = 0
+ while i < len(list) - 1:
+ a = list[i]
+ b = list[i + 1]
+ if should_merge(a, b):
+ ret.append(merge(a, b))
+ i += 2
+ else:
+ ret.append(a)
+ i += 1
+ if i == len(list) - 1:
+ ret.append(list[i])
+ return ret
+
+QuotedString = namedtuple('QuotedString', 'contents comments')
+_Arg = namedtuple('Arg', 'contents comments')
+_Command = namedtuple('Command', 'name body comment')
+BlankLine = namedtuple('BlankLine', '')
+
+class File(list):
+ def __repr__(self):
+ return 'File(' + repr(list(self)) + ')'
+
+class Comment(str):
+ def __repr__(self):
+ return 'Comment(' + str(self) + ')'
+
+def Arg(contents, comments=None):
+ return _Arg(contents, comments or [])
+
+def Command(name, body, comment=None):
+ return _Command(name, body, comment)
+
+class CMakeParseError(Exception):
+ pass
+
+def prettify(s):
+ """
+ Returns the pretty-print of the contents of a CMakeLists file.
+ """
+ return str(parse(s))
+
+def parse(s):
+ '''
+ Parses a string s in CMakeLists format whose
+ contents are assumed to have come from the
+ file at the given path.
+ '''
+ nums_toks = tokenize(s)
+ nums_items = list(parse_file(nums_toks))
+ nums_items = attach_comments_to_commands(nums_items)
+ items = [item for _, item in nums_items]
+ return File(items)
+
+def parse_file(toks):
+ '''
+ Yields line number ranges and top-level elements of the syntax tree for
+ a CMakeLists file, given a generator of tokens from the file.
+
+ toks must really be a generator, not a list, for this to work.
+ '''
+ prev_type = 'newline'
+ for line_num, (typ, tok_contents) in toks:
+ if typ == 'comment':
+ yield ([line_num], Comment(tok_contents))
+ elif typ == 'newline' and prev_type == 'newline':
+ yield ([line_num], BlankLine())
+ elif typ == 'word':
+ line_nums, cmd = parse_command(line_num, tok_contents, toks)
+ yield (line_nums, cmd)
+ prev_type = typ
+
+def attach_comments_to_commands(nodes):
+ return merge_pairs(nodes, command_then_comment, attach_comment_to_command)
+
+def command_then_comment(a, b):
+ line_nums_a, thing_a = a
+ line_nums_b, thing_b = b
+ return (isinstance(thing_a, _Command) and
+ isinstance(thing_b, Comment) and
+ set(line_nums_a).intersection(line_nums_b))
+
+def attach_comment_to_command(lnums_command, lnums_comment):
+ command_lines, command = lnums_command
+ _, comment = lnums_comment
+ return command_lines, Command(command.name, command.body[:], comment)
+
+def parse_command(start_line_num, command_name, toks):
+ cmd = Command(name=command_name, body=[], comment=None)
+ expect('left paren', toks)
+ for line_num, (typ, tok_contents) in toks:
+ if typ == 'right paren':
+ line_nums = range(start_line_num, line_num + 1)
+ return line_nums, cmd
+ elif typ == 'left paren':
+ raise ValueError('Unexpected left paren at line %s' % line_num)
+ elif typ in ('word', 'string'):
+ cmd.body.append(Arg(tok_contents, []))
+ elif typ == 'comment':
+ c = tok_contents
+ if cmd.body:
+ cmd.body[-1].comments.append(c)
+ else:
+ cmd.comments.append(c)
+ msg = 'File ended while processing command "%s" started at line %s' % (
+ command_name, start_line_num)
+ raise CMakeParseError(msg)
+
+def expect(expected_type, toks):
+ line_num, (typ, tok_contents) = toks.next()
+ if typ != expected_type:
+ msg = 'Expected a %s, but got "%s" at line %s' % (
+ expected_type, tok_contents, line_num)
+ raise CMakeParseError(msg)
+
+# http://stackoverflow.com/questions/691148/pythonic-way-to-implement-a-tokenizer
+scanner = re.Scanner([
+ (r'#.*', lambda scanner, token: ("comment", token)),
+ (r'"[^"]*"', lambda scanner, token: ("string", token)),
+ (r"\(", lambda scanner, token: ("left paren", token)),
+ (r"\)", lambda scanner, token: ("right paren", token)),
+ (r'[^ \t\r\n()#"]+', lambda scanner, token: ("word", token)),
+ (r'\n', lambda scanner, token: ("newline", token)),
+ (r"\s+", None), # skip other whitespace
+])
+
+def tokenize(s):
+ """
+ Yields pairs of the form (line_num, (token_type, token_contents))
+ given a string containing the contents of a CMakeLists file.
+ """
+ toks, remainder = scanner.scan(s)
+ line_num = 1
+ if remainder != '':
+ msg = 'Unrecognized tokens at line %s: %s' % (line_num, remainder)
+ raise ValueError(msg)
+ for tok_type, tok_contents in toks:
+ yield line_num, (tok_type, tok_contents.strip())
+ line_num += tok_contents.count('\n')
+
diff --git a/python/plugins/processing/otb/maintenance/white_list.xml b/python/plugins/processing/otb/maintenance/white_list.xml
new file mode 100644
index 000000000000..5339156d9533
--- /dev/null
+++ b/python/plugins/processing/otb/maintenance/white_list.xml
@@ -0,0 +1,64 @@
+
+
+
+ BinaryMorphologicalOperation
+ EdgeExtraction
+ GrayScaleMorphologicalOperation
+ DimensionalityReduction
+ Pansharpening
+ ExtractROI
+ RigidTransformResample
+ Segmentation
+ KMeansClassification
+ TrainSVMImagesClassifier
+ ComputeConfusionMatrix
+ OpticalCalibration
+ SarRadiometricCalibration
+ Smoothing
+
+
+ MultivariateAlterationDetector
+ OpticalCalibration
+ StereoFramework
+ BinaryMorphologicalOperation
+ DimensionalityReduction
+ EdgeExtraction
+ GrayScaleMorphologicalOperation
+ LineSegmentDetection
+ LocalStatisticExtraction
+ RadiometricIndices
+ ConnectedComponentSegmentation
+ MeanShiftSmoothing
+ Segmentation
+ BandMath
+ ColorMapping
+ CompareImages
+ ConcatenateImages
+ ConcatenateVectorData
+ ExtractROI
+ KmzExport
+ ReadImageInfo
+ Rescale
+ Smoothing
+ TileFusion
+ Pansharpening
+ ClassificationMapRegularization
+ ComputeConfusionMatrix
+ ComputeImagesStatistics
+ FusionOfClassifications
+ ImageClassifier
+ KMeansClassification
+ SOMClassification
+ TrainImagesClassifier
+ ImageEnvelope
+ OrthoRectification
+ RigidTransformResample
+ Superimpose
+ ComputeModulusAndPhase
+ HaralickTextureExtraction
+ HooverCompareSegmentation
+ LSMSSegmentation
+ LSMSSmallRegionsMerging
+ LSMSVectorization
+
+