diff --git a/.project b/.project
index 2c20d1e5..f814688a 100644
--- a/.project
+++ b/.project
@@ -1,11 +1,11 @@
-
-
- super-csv-parent
-
-
-
-
-
-
-
-
+
+
+ super-csv-parent
+
+
+
+
+
+
+
+
diff --git a/pom.xml b/pom.xml
index 50bdc658..ea42c19d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,481 +1,481 @@
-
-
-
- 4.0.0
-
-
-
- org.sonatype.oss
- oss-parent
- 7
-
-
- net.sf.supercsv
- super-csv-parent
- 2.0.2-SNAPSHOT
- pom
- http://supercsv.sourceforge.net
- Super CSV
- Super CSV parent project
- 2007
-
-
- super-csv
- super-csv-dozer
- super-csv-distribution
-
-
-
- UTF-8
-
-
-
-
-
-
- org.codehaus.mojo
- animal-sniffer-maven-plugin
- 1.9
-
-
- org.apache.maven.plugins
- maven-ant-plugin
- 2.3
-
-
- org.apache.maven.plugins
- maven-assembly-plugin
- 2.4
-
-
- org.apache.felix
- maven-bundle-plugin
- 2.3.7
-
-
- org.codehaus.mojo
- cobertura-maven-plugin
- 2.5.1
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
- 3.0
-
-
- org.apache.maven.plugins
- maven-eclipse-plugin
- 2.9
-
-
- org.codehaus.mojo
- findbugs-maven-plugin
- 2.5.2
-
-
- org.apache.maven.plugins
- maven-jar-plugin
- 2.4
-
-
- org.apache.maven.plugins
- maven-javadoc-plugin
- 2.9
-
-
- org.apache.maven.plugins
- maven-jxr-plugin
- 2.3
-
-
- org.apache.maven.plugins
- maven-pmd-plugin
- 2.7.1
-
-
- org.apache.maven.plugins
- maven-project-info-reports-plugin
- 2.6
-
-
- org.apache.maven.plugins
- maven-release-plugin
- 2.3.2
-
-
- org.apache.maven.plugins
- maven-source-plugin
- 2.1.2
-
-
- org.apache.maven.plugins
- maven-site-plugin
- 3.2
-
-
-
- org.apache.maven.wagon
- wagon-ssh
- 2.2
-
-
-
-
- org.apache.maven.plugins
- maven-surefire-plugin
- 2.12.4
-
-
- org.apache.maven.plugins
- maven-surefire-report-plugin
- 2.12.4
-
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
- 1.5
-
-
-
-
-
- org.apache.felix
- maven-bundle-plugin
- true
-
-
- org.supercsv.*
-
-
-
-
- bundle-manifest
- process-classes
-
- manifest
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-jar-plugin
-
-
- ${project.build.outputDirectory}/META-INF/MANIFEST.MF
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-site-plugin
-
-
-
-
- org.apache.maven.plugins
- maven-eclipse-plugin
-
- true
- true
- true
-
-
- org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/J2SE-1.5
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-surefire-plugin
-
-
-
-
- org.codehaus.mojo
- cobertura-maven-plugin
-
-
- xml
- html
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-source-plugin
-
-
- attach-sources
-
- jar
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-javadoc-plugin
-
-
- attach-javadocs
-
- jar
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-release-plugin
-
- true
- clean verify
- deploy
- v@{project.version}
-
-
-
-
-
- org.codehaus.mojo
- animal-sniffer-maven-plugin
-
-
- org.codehaus.mojo.signature
- java15
- 1.0
-
-
-
-
- check-java15-compatability
- test
-
- check
-
-
-
-
-
-
-
-
-
-
-
- net.sf.supercsv
- super-csv
- ${project.version}
-
-
- net.sf.supercsv
- super-csv-dozer
- ${project.version}
-
-
-
-
-
-
- junit
- junit
- 4.10
- test
-
-
-
-
-
-
- Kasper Graversen
- kbg
-
- Project Lead/Founder
-
- +1
-
-
- James Bassett
- jamesbassett
- james.bassett@gmail.com
- +10
-
- Developer (current)
-
-
-
- Dominique De Vito
- ddv36a78
-
- Developer (past)
-
-
-
-
-
-
-
- Alf Richter (Haskell2000)
-
-
- John Gibson (noredshadow)
-
-
- Lubor Vágenknecht (lubor)
-
-
- Pete Lichten (boneshaker335)
-
-
- Thor Michael Støre (thormick)
-
-
-
-
- Super CSV
- http://supercsv.sourceforge.net/
-
-
-
-
- Apache License, Version 2.0
- http://www.apache.org/licenses/LICENSE-2.0.html
-
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-project-info-reports-plugin
- 2.6
-
-
-
- index
- dependencies
- project-team
- issue-tracking
- license
-
-
-
-
-
-
-
- org.codehaus.mojo
- cobertura-maven-plugin
- 2.5.1
-
-
-
-
- org.apache.maven.plugins
- maven-surefire-report-plugin
- 2.12.4
-
-
-
-
- org.apache.maven.plugins
- maven-javadoc-plugin
- 2.9
-
- true
-
- http://java.sun.com/j2se/1.5.0/docs/api
- http://dozer.sourceforge.net/apidocs
-
-
-
-
-
- javadoc
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-pmd-plugin
- 2.7.1
-
- 1.5
-
-
-
-
-
- org.codehaus.mojo
- findbugs-maven-plugin
- 2.5.2
-
-
-
-
- org.apache.maven.plugins
- maven-jxr-plugin
- 2.3
-
-
-
-
-
- Sourceforge
- https://sourceforge.net/p/supercsv/bugs/
-
-
-
-
- supercsv.sourceforge.net
- scp://shell.sourceforge.net/home/project-web/supercsv/htdocs
-
-
-
-
- scm:svn:https://svn.code.sf.net/p/supercsv/code/trunk
- scm:svn:svn+ssh://jamesbassett@svn.code.sf.net/p/supercsv/code/trunk
- https://sourceforge.net/p/supercsv/code/
-
+
+
+
+ 4.0.0
+
+
+
+ org.sonatype.oss
+ oss-parent
+ 7
+
+
+ net.sf.supercsv
+ super-csv-parent
+ 2.0.2-SNAPSHOT
+ pom
+ http://supercsv.sourceforge.net
+ Super CSV
+ Super CSV parent project
+ 2007
+
+
+ super-csv
+ super-csv-dozer
+ super-csv-distribution
+
+
+
+ UTF-8
+
+
+
+
+
+
+ org.codehaus.mojo
+ animal-sniffer-maven-plugin
+ 1.9
+
+
+ org.apache.maven.plugins
+ maven-ant-plugin
+ 2.3
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+ 2.4
+
+
+ org.apache.felix
+ maven-bundle-plugin
+ 2.3.7
+
+
+ org.codehaus.mojo
+ cobertura-maven-plugin
+ 2.5.1
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.0
+
+
+ org.apache.maven.plugins
+ maven-eclipse-plugin
+ 2.9
+
+
+ org.codehaus.mojo
+ findbugs-maven-plugin
+ 2.5.2
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+ 2.4
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ 2.9
+
+
+ org.apache.maven.plugins
+ maven-jxr-plugin
+ 2.3
+
+
+ org.apache.maven.plugins
+ maven-pmd-plugin
+ 2.7.1
+
+
+ org.apache.maven.plugins
+ maven-project-info-reports-plugin
+ 2.6
+
+
+ org.apache.maven.plugins
+ maven-release-plugin
+ 2.3.2
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ 2.1.2
+
+
+ org.apache.maven.plugins
+ maven-site-plugin
+ 3.2
+
+
+
+ org.apache.maven.wagon
+ wagon-ssh
+ 2.2
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ 2.12.4
+
+
+ org.apache.maven.plugins
+ maven-surefire-report-plugin
+ 2.12.4
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+ 1.5
+
+
+
+
+
+ org.apache.felix
+ maven-bundle-plugin
+ true
+
+
+ org.supercsv.*
+
+
+
+
+ bundle-manifest
+ process-classes
+
+ manifest
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+ ${project.build.outputDirectory}/META-INF/MANIFEST.MF
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-site-plugin
+
+
+
+
+ org.apache.maven.plugins
+ maven-eclipse-plugin
+
+ true
+ true
+ true
+
+
+ org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/J2SE-1.5
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+
+
+ org.codehaus.mojo
+ cobertura-maven-plugin
+
+
+ xml
+ html
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ attach-sources
+
+ jar
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-release-plugin
+
+ true
+ clean verify
+ deploy
+ v@{project.version}
+
+
+
+
+
+ org.codehaus.mojo
+ animal-sniffer-maven-plugin
+
+
+ org.codehaus.mojo.signature
+ java15
+ 1.0
+
+
+
+
+ check-java15-compatability
+ test
+
+ check
+
+
+
+
+
+
+
+
+
+
+
+ net.sf.supercsv
+ super-csv
+ ${project.version}
+
+
+ net.sf.supercsv
+ super-csv-dozer
+ ${project.version}
+
+
+
+
+
+
+ junit
+ junit
+ 4.10
+ test
+
+
+
+
+
+
+ Kasper Graversen
+ kbg
+
+ Project Lead/Founder
+
+ +1
+
+
+ James Bassett
+ jamesbassett
+ james.bassett@gmail.com
+ +10
+
+ Developer (current)
+
+
+
+ Dominique De Vito
+ ddv36a78
+
+ Developer (past)
+
+
+
+
+
+
+
+ Alf Richter (Haskell2000)
+
+
+ John Gibson (noredshadow)
+
+
+ Lubor Vágenknecht (lubor)
+
+
+ Pete Lichten (boneshaker335)
+
+
+ Thor Michael Støre (thormick)
+
+
+
+
+ Super CSV
+ http://supercsv.sourceforge.net/
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.html
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-project-info-reports-plugin
+ 2.6
+
+
+
+ index
+ dependencies
+ project-team
+ issue-tracking
+ license
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ cobertura-maven-plugin
+ 2.5.1
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-report-plugin
+ 2.12.4
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ 2.9
+
+ true
+
+ http://java.sun.com/j2se/1.5.0/docs/api
+ http://dozer.sourceforge.net/apidocs
+
+
+
+
+
+ javadoc
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-pmd-plugin
+ 2.7.1
+
+ 1.5
+
+
+
+
+
+ org.codehaus.mojo
+ findbugs-maven-plugin
+ 2.5.2
+
+
+
+
+ org.apache.maven.plugins
+ maven-jxr-plugin
+ 2.3
+
+
+
+
+
+ Sourceforge
+ https://sourceforge.net/p/supercsv/bugs/
+
+
+
+
+ supercsv.sourceforge.net
+ scp://shell.sourceforge.net/home/project-web/supercsv/htdocs
+
+
+
+
+ scm:svn:https://svn.code.sf.net/p/supercsv/code/trunk
+ scm:svn:svn+ssh://jamesbassett@svn.code.sf.net/p/supercsv/code/trunk
+ https://sourceforge.net/p/supercsv/code/
+
\ No newline at end of file
diff --git a/src/site/apt/cell_processors.apt b/src/site/apt/cell_processors.apt
index 1a373f4f..1e35190e 100644
--- a/src/site/apt/cell_processors.apt
+++ b/src/site/apt/cell_processors.apt
@@ -1,112 +1,112 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ---------------
- Cell Processors
- ---------------
-
-Cell processors
-
- Cell processors are an integral part of reading and writing with Super CSV - they automate the data type conversions, and enforce constraints.
- They implement the design pattern - each processor has a single, well-defined purpose and can be chained
- together with other processors to fully automate all of the required conversions and constraint validation for a single CSV column.
-
- A typical CellProcessor configuration for reading the following CSV file
-
-+-----------------------------------+
-name,birthDate,weight
-John,25/12/1946,83.5
-Alice,06/08/1958,
-Bob,01/03/1984,65.0,
-+-----------------------------------+
-
- might look like the following:
-
-+---------------------------------------------------------------------------------------------------------------------------------------------+
-public static final CellProcessor[] PROCESSORS = new CellProcessor[] {
- null,
- new ParseDate("dd/MM/yyyy"),
- new Optional(new ParseDouble()) };
-+---------------------------------------------------------------------------------------------------------------------------------------------+
-
- The number of elements in the CellProcessor array must match up with the number of columns to be processed - the file has 3 columns,
- so the CellProcessor array has 3 elements.
-
- [[1]] The first processor (for the name column) is <<>>, which indicates that (the String is used unchanged).
- Semantically, it might have been better to replace that with <<>>, which means the same thing.
- If we wanted to guarantee that name was supplied (i.e. it's mandatory), then we could have used <<>> instead
- (which works because empty String (<<<"">>>) is converted to <<>> when reading).
-
- [[2]] The second processor (for the birthDate column) is <<>>, which indicates that that column is mandatory,
- and should be parsed as a Date using the supplied format.
-
- [[3]] The third processor (for the weight column) is <<>>, which indicates that the column is optional
- (the value will be <<>> if the column is empty), but if it's supplied then parse it as a Double.
-
-* Cell processor overview
-
- * processors are similar to servlet filters in JEE - they can be chained together, and they can modify the data that's passed along the chain
-
- * processors are executed from (but yes, the processor's constructors are invoked from right to left!)
-
- * the number of elements in the CellProcessor array must match up with the number of columns to be processed
-
- * a <<>> processor means
-
- * most processors expect input to be non-null - if it's an optional column then chain an <<>> processor before it, e.g.
- <<>>. Further processing (processors chained after <<>>) will be skipped if the value to be read/written is <<>>.
-
- * all processors throw <<>> if they encounter data they cannot process (this shouldn't normally happen if your processor configuration is correct)
-
- * constraint-validating processors throw <<>> if the value does not satisfy the constraint
-
-* Available cell processors
-
- The examples above just touch the surface of what's possible with cell processors.
- The following table shows all of the processors available for reading, writing, and constraint validation.
-
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- <> || Writing || Reading / Writing || Constraints
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseBigDecimal.html}ParseBigDecimal}} | {{{./apidocs/org/supercsv/cellprocessor/FmtBool.html}FmtBool}} | {{{./apidocs/org/supercsv/cellprocessor/ConvertNullTo.html}ConvertNullTo}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/DMinMax.html}DMinMax}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseBool.html}ParseBool}} | {{{./apidocs/org/supercsv/cellprocessor/FmtDate.html}FmtDate}} | {{{./apidocs/org/supercsv/cellprocessor/HashMapper.html}HashMapper}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/Equals.html}Equals}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseChar.html}ParseChar}} | {{{./apidocs/org/supercsv/cellprocessor/FmtNumber.html}FmtNumber}} | {{{./apidocs/org/supercsv/cellprocessor/Optional.html}Optional}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/ForbidSubStr.html}ForbidSubStr}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseDate.html}ParseDate}} | | {{{./apidocs/org/supercsv/cellprocessor/StrReplace.html}StrReplace}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/IsIncludedIn.html}IsIncludedIn}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseDouble.html}ParseDouble}} | | {{{./apidocs/org/supercsv/cellprocessor/Token.html}Token}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/LMinMax.html}LMinMax}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseInt.html}ParseInt}} | | {{{./apidocs/org/supercsv/cellprocessor/Trim.html}Trim}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/NotNull.html}NotNull}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- {{{./apidocs/org/supercsv/cellprocessor/ParseLong.html}ParseLong}} | | {{{./apidocs/org/supercsv/cellprocessor/Truncate.html}Truncate}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/RequireHashCode.html}RequireHashCode}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/RequireSubStr.html}RequireSubStr}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/Strlen.html}Strlen}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/StrMinMax.html}StrMinMax}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/StrNotNullOrEmpty.html}StrNotNullOrEmpty}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/StrRegEx.html}StrRegEx}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/Unique.html}Unique}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
- | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/UniqueHashCode.html}UniqueHashCode}}
-*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
-
-
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ---------------
+ Cell Processors
+ ---------------
+
+Cell processors
+
+ Cell processors are an integral part of reading and writing with Super CSV - they automate the data type conversions, and enforce constraints.
+ They implement the design pattern - each processor has a single, well-defined purpose and can be chained
+ together with other processors to fully automate all of the required conversions and constraint validation for a single CSV column.
+
+ A typical CellProcessor configuration for reading the following CSV file
+
++-----------------------------------+
+name,birthDate,weight
+John,25/12/1946,83.5
+Alice,06/08/1958,
+Bob,01/03/1984,65.0,
++-----------------------------------+
+
+ might look like the following:
+
++---------------------------------------------------------------------------------------------------------------------------------------------+
+public static final CellProcessor[] PROCESSORS = new CellProcessor[] {
+ null,
+ new ParseDate("dd/MM/yyyy"),
+ new Optional(new ParseDouble()) };
++---------------------------------------------------------------------------------------------------------------------------------------------+
+
+ The number of elements in the CellProcessor array must match up with the number of columns to be processed - the file has 3 columns,
+ so the CellProcessor array has 3 elements.
+
+ [[1]] The first processor (for the name column) is <<>>, which indicates that (the String is used unchanged).
+ Semantically, it might have been better to replace that with <<>>, which means the same thing.
+ If we wanted to guarantee that name was supplied (i.e. it's mandatory), then we could have used <<>> instead
+ (which works because empty String (<<<"">>>) is converted to <<>> when reading).
+
+ [[2]] The second processor (for the birthDate column) is <<>>, which indicates that that column is mandatory,
+ and should be parsed as a Date using the supplied format.
+
+ [[3]] The third processor (for the weight column) is <<>>, which indicates that the column is optional
+ (the value will be <<>> if the column is empty), but if it's supplied then parse it as a Double.
+
+* Cell processor overview
+
+ * processors are similar to servlet filters in JEE - they can be chained together, and they can modify the data that's passed along the chain
+
+ * processors are executed from (but yes, the processor's constructors are invoked from right to left!)
+
+ * the number of elements in the CellProcessor array must match up with the number of columns to be processed
+
+ * a <<>> processor means
+
+ * most processors expect input to be non-null - if it's an optional column then chain an <<>> processor before it, e.g.
+ <<>>. Further processing (processors chained after <<>>) will be skipped if the value to be read/written is <<>>.
+
+ * all processors throw <<>> if they encounter data they cannot process (this shouldn't normally happen if your processor configuration is correct)
+
+ * constraint-validating processors throw <<>> if the value does not satisfy the constraint
+
+* Available cell processors
+
+ The examples above just touch the surface of what's possible with cell processors.
+ The following table shows all of the processors available for reading, writing, and constraint validation.
+
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ <> || Writing || Reading / Writing || Constraints
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseBigDecimal.html}ParseBigDecimal}} | {{{./apidocs/org/supercsv/cellprocessor/FmtBool.html}FmtBool}} | {{{./apidocs/org/supercsv/cellprocessor/ConvertNullTo.html}ConvertNullTo}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/DMinMax.html}DMinMax}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseBool.html}ParseBool}} | {{{./apidocs/org/supercsv/cellprocessor/FmtDate.html}FmtDate}} | {{{./apidocs/org/supercsv/cellprocessor/HashMapper.html}HashMapper}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/Equals.html}Equals}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseChar.html}ParseChar}} | {{{./apidocs/org/supercsv/cellprocessor/FmtNumber.html}FmtNumber}} | {{{./apidocs/org/supercsv/cellprocessor/Optional.html}Optional}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/ForbidSubStr.html}ForbidSubStr}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseDate.html}ParseDate}} | | {{{./apidocs/org/supercsv/cellprocessor/StrReplace.html}StrReplace}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/IsIncludedIn.html}IsIncludedIn}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseDouble.html}ParseDouble}} | | {{{./apidocs/org/supercsv/cellprocessor/Token.html}Token}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/LMinMax.html}LMinMax}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseInt.html}ParseInt}} | | {{{./apidocs/org/supercsv/cellprocessor/Trim.html}Trim}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/NotNull.html}NotNull}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ {{{./apidocs/org/supercsv/cellprocessor/ParseLong.html}ParseLong}} | | {{{./apidocs/org/supercsv/cellprocessor/Truncate.html}Truncate}} | {{{./apidocs/org/supercsv/cellprocessor/constraint/RequireHashCode.html}RequireHashCode}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/RequireSubStr.html}RequireSubStr}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/Strlen.html}Strlen}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/StrMinMax.html}StrMinMax}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/StrNotNullOrEmpty.html}StrNotNullOrEmpty}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/StrRegEx.html}StrRegEx}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/Unique.html}Unique}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+ | | | {{{./apidocs/org/supercsv/cellprocessor/constraint/UniqueHashCode.html}UniqueHashCode}}
+*----------------------------------------------------------------------------------*---------------------------------------------------------------------*-----------------------------------------------------------------------------*-----------------------------------------------------------------------------------------------*
+
+
diff --git a/src/site/apt/csv_specification.apt b/src/site/apt/csv_specification.apt
index cf6a372c..402379e6 100644
--- a/src/site/apt/csv_specification.apt
+++ b/src/site/apt/csv_specification.apt
@@ -1,138 +1,138 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ------------
- What is CSV?
- ------------
-
-What is CSV?
-
- The comma-separated values (CSV) format is a widely used text file format often used to exchange data between applications.
- It contains multiple records (one per line), and each field is delimited by a comma.
- {{{http://en.wikipedia.org/wiki/Comma-separated_values}Wikipedia}} has a good explanation of the CSV format and its history.
-
- There is no definitive standard for CSV, however the most commonly accepted definition is {{{http://tools.ietf.org/html/rfc4180}RFC 4180}} -
- the MIME type definition for CSV. Super CSV is 100% compliant with RFC 4180, while still allowing some flexibility where CSV files deviate from the definition.
-
- The following shows each rule defined in RFC 4180, and how it is treated by Super CSV.
-
-* Rule 1
-
------------------------------------------------------------------------------------
-1. Each record is located on a separate line, delimited by a line
- break (CRLF). For example:
-
- aaa,bbb,ccc CRLF
- zzz,yyy,xxx CRLF
------------------------------------------------------------------------------------
-
- Super CSV accepts all line breaks (Windows, Mac or Unix) when reading CSV files,
- and uses the end of line symbols specified by the user (via the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object) when writing CSV files.
-
-* Rule 2
-
------------------------------------------------------------------------------------
-2. The last record in the file may or may not have an ending line
- break. For example:
-
- aaa,bbb,ccc CRLF
- zzz,yyy,xxx
------------------------------------------------------------------------------------
-
- Super CSV add a line break when writing the last line of a CSV file, but a line break on the last line is optional when reading.
-
-* Rule 3
-
------------------------------------------------------------------------------------
-3. There maybe an optional header line appearing as the first line
- of the file with the same format as normal record lines. This
- header will contain names corresponding to the fields in the file
- and should contain the same number of fields as the records in
- the rest of the file (the presence or absence of the header line
- should be indicated via the optional "header" parameter of this
- MIME type). For example:
-
- field_name,field_name,field_name CRLF
- aaa,bbb,ccc CRLF
- zzz,yyy,xxx CRLF
------------------------------------------------------------------------------------
-
- Super CSV provides methods for reading and writing headers, if required.
- It also makes use of the header for mapping between CSV and POJOs (see {{{./apidocs/org/supercsv/io/CsvBeanReader.html}CsvBeanReader}}/{{{./apidocs/org/supercsv/io/CsvBeanWriter.html}CsvBeanWriter}}).
-
-* Rule 4
-
------------------------------------------------------------------------------------
-4. Within the header and each record, there may be one or more
- fields, separated by commas. Each line should contain the same
- number of fields throughout the file. Spaces are considered part
- of a field and should not be ignored. The last field in the
- record must not be followed by a comma. For example:
-
- aaa,bbb,ccc
------------------------------------------------------------------------------------
-
- The delimiter in Super CSV is configurable via the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object, though it is typically a comma.
-
- Super CSV expects each line to contain the same number of fields (including the header).
- In cases where the number of fields varies, {{{./apidocs/org/supercsv/io/CsvListReader.html}CsvListReader}}/{{{./apidocs/org/supercsv/io/CsvListWriter.html}CsvListWriter}} should be used, as they contain methods for reading/writing lines of arbitrary length.
-
- By default, Super CSV considers spaces part of a field. However, if you require that surrounding spaces should not be part of the field
- (unless within double quotes), then you can enable in your {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object. This will ensure that surrounding spaces are trimmed when reading
- (if not within double quotes), and that quotes are applied to a field with surrounding spaces when writing.
-
-* Rule 5
-
------------------------------------------------------------------------------------
-5. Each field may or may not be enclosed in double quotes (however
- some programs, such as Microsoft Excel, do not use double quotes
- at all). If fields are not enclosed with double quotes, then
- double quotes may not appear inside the fields. For example:
-
- "aaa","bbb","ccc" CRLF
- zzz,yyy,xxx
------------------------------------------------------------------------------------
-
- Super CSV only encloses fields in double quotes when they require escaping (see Rule 6).
-
- The quote character is configurable via the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object, though is typically a double quote (<<<">>>).
-
-* Rule 6
-
------------------------------------------------------------------------------------
-6. Fields containing line breaks (CRLF), double quotes, and commas
- should be enclosed in double-quotes. For example:
-
- "aaa","b CRLF
- bb","ccc" CRLF
- zzz,yyy,xxx
------------------------------------------------------------------------------------
-
- Super CSV handles multi-line fields (as long as they're enclosed in quotes) when reading,
- and encloses a field in quotes when writing if it contains a newline, quote character or delimiter (defined in the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object).
-
-* Rule 7
-
------------------------------------------------------------------------------------
-7. If double-quotes are used to enclose fields, then a double-quote
- appearing inside a field must be escaped by preceding it with
- another double quote. For example:
-
- "aaa","b""bb","ccc"
------------------------------------------------------------------------------------
-
- Super CSV escapes double-quotes with a preceding double-quote. Please note that the sometimes-used convention of escaping double-quotes as <<<\">>> (instead of <<<"">>>)
- is <>.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ------------
+ What is CSV?
+ ------------
+
+What is CSV?
+
+ The comma-separated values (CSV) format is a widely used text file format often used to exchange data between applications.
+ It contains multiple records (one per line), and each field is delimited by a comma.
+ {{{http://en.wikipedia.org/wiki/Comma-separated_values}Wikipedia}} has a good explanation of the CSV format and its history.
+
+ There is no definitive standard for CSV, however the most commonly accepted definition is {{{http://tools.ietf.org/html/rfc4180}RFC 4180}} -
+ the MIME type definition for CSV. Super CSV is 100% compliant with RFC 4180, while still allowing some flexibility where CSV files deviate from the definition.
+
+ The following shows each rule defined in RFC 4180, and how it is treated by Super CSV.
+
+* Rule 1
+
+-----------------------------------------------------------------------------------
+1. Each record is located on a separate line, delimited by a line
+ break (CRLF). For example:
+
+ aaa,bbb,ccc CRLF
+ zzz,yyy,xxx CRLF
+-----------------------------------------------------------------------------------
+
+ Super CSV accepts all line breaks (Windows, Mac or Unix) when reading CSV files,
+ and uses the end of line symbols specified by the user (via the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object) when writing CSV files.
+
+* Rule 2
+
+-----------------------------------------------------------------------------------
+2. The last record in the file may or may not have an ending line
+ break. For example:
+
+ aaa,bbb,ccc CRLF
+ zzz,yyy,xxx
+-----------------------------------------------------------------------------------
+
+ Super CSV add a line break when writing the last line of a CSV file, but a line break on the last line is optional when reading.
+
+* Rule 3
+
+-----------------------------------------------------------------------------------
+3. There maybe an optional header line appearing as the first line
+ of the file with the same format as normal record lines. This
+ header will contain names corresponding to the fields in the file
+ and should contain the same number of fields as the records in
+ the rest of the file (the presence or absence of the header line
+ should be indicated via the optional "header" parameter of this
+ MIME type). For example:
+
+ field_name,field_name,field_name CRLF
+ aaa,bbb,ccc CRLF
+ zzz,yyy,xxx CRLF
+-----------------------------------------------------------------------------------
+
+ Super CSV provides methods for reading and writing headers, if required.
+ It also makes use of the header for mapping between CSV and POJOs (see {{{./apidocs/org/supercsv/io/CsvBeanReader.html}CsvBeanReader}}/{{{./apidocs/org/supercsv/io/CsvBeanWriter.html}CsvBeanWriter}}).
+
+* Rule 4
+
+-----------------------------------------------------------------------------------
+4. Within the header and each record, there may be one or more
+ fields, separated by commas. Each line should contain the same
+ number of fields throughout the file. Spaces are considered part
+ of a field and should not be ignored. The last field in the
+ record must not be followed by a comma. For example:
+
+ aaa,bbb,ccc
+-----------------------------------------------------------------------------------
+
+ The delimiter in Super CSV is configurable via the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object, though it is typically a comma.
+
+ Super CSV expects each line to contain the same number of fields (including the header).
+ In cases where the number of fields varies, {{{./apidocs/org/supercsv/io/CsvListReader.html}CsvListReader}}/{{{./apidocs/org/supercsv/io/CsvListWriter.html}CsvListWriter}} should be used, as they contain methods for reading/writing lines of arbitrary length.
+
+ By default, Super CSV considers spaces part of a field. However, if you require that surrounding spaces should not be part of the field
+ (unless within double quotes), then you can enable in your {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object. This will ensure that surrounding spaces are trimmed when reading
+ (if not within double quotes), and that quotes are applied to a field with surrounding spaces when writing.
+
+* Rule 5
+
+-----------------------------------------------------------------------------------
+5. Each field may or may not be enclosed in double quotes (however
+ some programs, such as Microsoft Excel, do not use double quotes
+ at all). If fields are not enclosed with double quotes, then
+ double quotes may not appear inside the fields. For example:
+
+ "aaa","bbb","ccc" CRLF
+ zzz,yyy,xxx
+-----------------------------------------------------------------------------------
+
+ Super CSV only encloses fields in double quotes when they require escaping (see Rule 6).
+
+ The quote character is configurable via the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object, though is typically a double quote (<<<">>>).
+
+* Rule 6
+
+-----------------------------------------------------------------------------------
+6. Fields containing line breaks (CRLF), double quotes, and commas
+ should be enclosed in double-quotes. For example:
+
+ "aaa","b CRLF
+ bb","ccc" CRLF
+ zzz,yyy,xxx
+-----------------------------------------------------------------------------------
+
+ Super CSV handles multi-line fields (as long as they're enclosed in quotes) when reading,
+ and encloses a field in quotes when writing if it contains a newline, quote character or delimiter (defined in the {{{./apidocs/org/supercsv/prefs/CsvPreference.html}CsvPreference}} object).
+
+* Rule 7
+
+-----------------------------------------------------------------------------------
+7. If double-quotes are used to enclose fields, then a double-quote
+ appearing inside a field must be escaped by preceding it with
+ another double quote. For example:
+
+ "aaa","b""bb","ccc"
+-----------------------------------------------------------------------------------
+
+ Super CSV escapes double-quotes with a preceding double-quote. Please note that the sometimes-used convention of escaping double-quotes as <<<\">>> (instead of <<<"">>>)
+ is <>.
\ No newline at end of file
diff --git a/src/site/apt/downloading.apt.vm b/src/site/apt/downloading.apt.vm
index 56655ed0..242b06ff 100644
--- a/src/site/apt/downloading.apt.vm
+++ b/src/site/apt/downloading.apt.vm
@@ -1,71 +1,71 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ---------------------
- Downloading Super CSV
- ---------------------
-
-Downloading Super CSV
-
-* Prerequisites
-
- [Java 1.5+] Super CSV is compiled for Java 1.5
-
-* Maven users
-
- If you are using Maven, simply copy the following dependency into your pom.xml file.
- The artifact is hosted at {{{http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22net.sf.supercsv%22}Maven Central}}, and is standalone (no dependencies).
-
-+-------------------------------------+
-
- ${project.groupId}
- super-csv
- ${project.version}
-
-+-------------------------------------+
-
- If you wish to use the new Dozer extension (with deep mapping and index-based mapping support),
- then you will also need the following:
-
-+-------------------------------------+
-
- ${project.groupId}
- super-csv-dozer
- ${project.version}
-
-+-------------------------------------+
-
-* Everyone else
-
- You can download the latest distribution zip file from {{{https://sourceforge.net/projects/supercsv/files/latest/download?source=files}SourceForge}},
- which contains:
-
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- <> || Description
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv/super-csv-${project.version}.jar | Super CSV ${project.version} (compiled classes only)
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv/super-csv-${project.version}-sources.jar | The Super CSV source code
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv/super-csv-${project.version}-javadoc.jar | The Super CSV Javadoc documentation
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv-dozer/super-csv-dozer-${project.version}.jar | Super CSV Dozer extension ${project.version} (compiled classes only)
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv-dozer/super-csv-dozer-${project.version}-sources.jar | The Super CSV Dozer extension source code
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv-dozer/super-csv-dozer-${project.version}-javadoc.jar | The Super CSV Dozer extension Javadoc documentation
-*----------------------------------------------------------------+---------------------------------------------------------------------+
- super-csv-dozer/lib | The Super CSV Dozer extension's dependencies (including Dozer)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ---------------------
+ Downloading Super CSV
+ ---------------------
+
+Downloading Super CSV
+
+* Prerequisites
+
+ [Java 1.5+] Super CSV is compiled for Java 1.5
+
+* Maven users
+
+ If you are using Maven, simply copy the following dependency into your pom.xml file.
+ The artifact is hosted at {{{http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22net.sf.supercsv%22}Maven Central}}, and is standalone (no dependencies).
+
++-------------------------------------+
+
+ ${project.groupId}
+ super-csv
+ ${project.version}
+
++-------------------------------------+
+
+ If you wish to use the new Dozer extension (with deep mapping and index-based mapping support),
+ then you will also need the following:
+
++-------------------------------------+
+
+ ${project.groupId}
+ super-csv-dozer
+ ${project.version}
+
++-------------------------------------+
+
+* Everyone else
+
+ You can download the latest distribution zip file from {{{https://sourceforge.net/projects/supercsv/files/latest/download?source=files}SourceForge}},
+ which contains:
+
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ <> || Description
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv/super-csv-${project.version}.jar | Super CSV ${project.version} (compiled classes only)
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv/super-csv-${project.version}-sources.jar | The Super CSV source code
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv/super-csv-${project.version}-javadoc.jar | The Super CSV Javadoc documentation
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv-dozer/super-csv-dozer-${project.version}.jar | Super CSV Dozer extension ${project.version} (compiled classes only)
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv-dozer/super-csv-dozer-${project.version}-sources.jar | The Super CSV Dozer extension source code
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv-dozer/super-csv-dozer-${project.version}-javadoc.jar | The Super CSV Dozer extension Javadoc documentation
+*----------------------------------------------------------------+---------------------------------------------------------------------+
+ super-csv-dozer/lib | The Super CSV Dozer extension's dependencies (including Dozer)
*----------------------------------------------------------------+---------------------------------------------------------------------+
\ No newline at end of file
diff --git a/src/site/apt/dozer.apt b/src/site/apt/dozer.apt
index 93674e2d..6b91440b 100644
--- a/src/site/apt/dozer.apt
+++ b/src/site/apt/dozer.apt
@@ -1,165 +1,165 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- -------------------------
- Super CSV Dozer extension
- -------------------------
-
-Super CSV Dozer extension
-
- The {{{./super-csv-dozer/index.html}Super CSV Dozer extension}} integrates Super CSV with {{{http://dozer.sourceforge.net}Dozer}},
- a powerful Javabean mapping library.
- Typically, Dozer requires lots of XML configuration but the addition of
- {{{http://dozer.sourceforge.net/documentation/apimappings.html}API mapping}} allows Super CSV to set up Dozer mappings dynamically.
-
- The use of Dozer allows {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanReader.html}CsvDozerBeanReader}} and
- {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanWriter.html}CsvDozerBeanWriter}} to map simple fields
- (the same as {{{./apidocs/org/supercsv/io/CsvBeanReader.html}CsvBeanReader}} and
- {{{./apidocs/org/supercsv/io/CsvBeanWriter.html}CsvBeanWriter}}),
- but to also perform deep mapping and index-based mapping as well!
-
- Check out the {{{./examples_dozer.html}examples}}, or read on for more information.
-
-* Deep mapping
-
- {{{http://dozer.sourceforge.net/documentation/deepmapping.html}Deep mapping}} allows you to make use of the relationships
- between your classes.
-
- For example, if your class had an <<>> field, you could utilize deep mapping as follows
- (assuming there are valid getters/setters defined for <<>>, <<>> and <<>> in the 3 involved classes):
-
-------------------------
-address.city.name
-------------------------
-
-* Indexed-based mapping
-
- {{{http://dozer.sourceforge.net/documentation/indexmapping.html}Index-based mapping}} allows you to access elements of arrays and
- Collections by their index.
-
- For example, if your class had a collection of Addresses, you could utilize index-based mapping
- to access the first one as follows:
-
-------------------------
-addresses[0]
-------------------------
-
- You can even combine index-based mapping with deep mapping:
-
-------------------------
-addresses[0].city.name
-------------------------
-
-* Logging
-
- Dozer uses {{{http://www.slf4j.org}SLF4J}} for logging. By default it will use a no-operation implementation (i.e. no logging),
- but you can use any of the supported implementations (logback, log4j, slf4j-simple) by placing the appropriate binding jar on the classpath.
-
- See the {{{http://www.slf4j.org/manual.html}SLF4J manual}} for more details.
-
-* Reference Mapping XML Configuration
-
- Most of the time you'll want to let Super CSV take care of the dozer configuration by simply calling the <<>> method.
- However, you might want to make use of the advanced features of Dozer (such as custom converters, bean factories, etc).
- In this case, you can supply Super CSV with a pre-configured DozerBeanMapper.
-
- The following XML is provided as a reference - it's the XML configuration used in the project's unit tests.
- The <<>> class is used internally as the input/output of any Dozer mapping (each indexed column represents a column of CSV).
- At a minimum, you should replace the <<>> with the class you're mapping,
- and update the field mappings as appropriate (but try not to change the XML attributes, as they're important!).
-
-+---------------------------------------------------------------------------------------------------+
-
-
-
-
- org.supercsv.io.dozer.CsvDozerBeanData
- org.supercsv.mock.dozer.SurveyResponse
-
- columns[0]
- age
-
-
- columns[1]
- consentGiven
-
-
- columns[2]
- answers[0].questionNo
-
-
- columns[3]
- answers[0].answer
-
-
- columns[4]
- answers[1].questionNo
-
-
- columns[5]
- answers[1].answer
-
-
- columns[6]
- answers[2].questionNo
-
-
- columns[7]
- answers[2].answer
-
-
-
-
-
- org.supercsv.mock.dozer.SurveyResponse
- org.supercsv.io.dozer.CsvDozerBeanData
-
- age
- columns[0]
-
-
- consentGiven
- columns[1]
-
-
- answers[0].questionNo
- columns[2]
-
-
- answers[0].answer
- columns[3]
-
-
- answers[1].questionNo
- columns[4]
-
-
- answers[1].answer
- columns[5]
-
-
- answers[2].questionNo
- columns[6]
-
-
- answers[2].answer
- columns[7]
-
-
-
-
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ -------------------------
+ Super CSV Dozer extension
+ -------------------------
+
+Super CSV Dozer extension
+
+ The {{{./super-csv-dozer/index.html}Super CSV Dozer extension}} integrates Super CSV with {{{http://dozer.sourceforge.net}Dozer}},
+ a powerful Javabean mapping library.
+ Typically, Dozer requires lots of XML configuration but the addition of
+ {{{http://dozer.sourceforge.net/documentation/apimappings.html}API mapping}} allows Super CSV to set up Dozer mappings dynamically.
+
+ The use of Dozer allows {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanReader.html}CsvDozerBeanReader}} and
+ {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanWriter.html}CsvDozerBeanWriter}} to map simple fields
+ (the same as {{{./apidocs/org/supercsv/io/CsvBeanReader.html}CsvBeanReader}} and
+ {{{./apidocs/org/supercsv/io/CsvBeanWriter.html}CsvBeanWriter}}),
+ but to also perform deep mapping and index-based mapping as well!
+
+ Check out the {{{./examples_dozer.html}examples}}, or read on for more information.
+
+* Deep mapping
+
+ {{{http://dozer.sourceforge.net/documentation/deepmapping.html}Deep mapping}} allows you to make use of the relationships
+ between your classes.
+
+ For example, if your class had an <<>> field, you could utilize deep mapping as follows
+ (assuming there are valid getters/setters defined for <<>>, <<>> and <<>> in the 3 involved classes):
+
+------------------------
+address.city.name
+------------------------
+
+* Indexed-based mapping
+
+ {{{http://dozer.sourceforge.net/documentation/indexmapping.html}Index-based mapping}} allows you to access elements of arrays and
+ Collections by their index.
+
+ For example, if your class had a collection of Addresses, you could utilize index-based mapping
+ to access the first one as follows:
+
+------------------------
+addresses[0]
+------------------------
+
+ You can even combine index-based mapping with deep mapping:
+
+------------------------
+addresses[0].city.name
+------------------------
+
+* Logging
+
+ Dozer uses {{{http://www.slf4j.org}SLF4J}} for logging. By default it will use a no-operation implementation (i.e. no logging),
+ but you can use any of the supported implementations (logback, log4j, slf4j-simple) by placing the appropriate binding jar on the classpath.
+
+ See the {{{http://www.slf4j.org/manual.html}SLF4J manual}} for more details.
+
+* Reference Mapping XML Configuration
+
+ Most of the time you'll want to let Super CSV take care of the dozer configuration by simply calling the <<>> method.
+ However, you might want to make use of the advanced features of Dozer (such as custom converters, bean factories, etc).
+ In this case, you can supply Super CSV with a pre-configured DozerBeanMapper.
+
+ The following XML is provided as a reference - it's the XML configuration used in the project's unit tests.
+ The <<>> class is used internally as the input/output of any Dozer mapping (each indexed column represents a column of CSV).
+ At a minimum, you should replace the <<>> with the class you're mapping,
+ and update the field mappings as appropriate (but try not to change the XML attributes, as they're important!).
+
++---------------------------------------------------------------------------------------------------+
+
+
+
+
+ org.supercsv.io.dozer.CsvDozerBeanData
+ org.supercsv.mock.dozer.SurveyResponse
+
+ columns[0]
+ age
+
+
+ columns[1]
+ consentGiven
+
+
+ columns[2]
+ answers[0].questionNo
+
+
+ columns[3]
+ answers[0].answer
+
+
+ columns[4]
+ answers[1].questionNo
+
+
+ columns[5]
+ answers[1].answer
+
+
+ columns[6]
+ answers[2].questionNo
+
+
+ columns[7]
+ answers[2].answer
+
+
+
+
+
+ org.supercsv.mock.dozer.SurveyResponse
+ org.supercsv.io.dozer.CsvDozerBeanData
+
+ age
+ columns[0]
+
+
+ consentGiven
+ columns[1]
+
+
+ answers[0].questionNo
+ columns[2]
+
+
+ answers[0].answer
+ columns[3]
+
+
+ answers[1].questionNo
+ columns[4]
+
+
+ answers[1].answer
+ columns[5]
+
+
+ answers[2].questionNo
+ columns[6]
+
+
+ answers[2].answer
+ columns[7]
+
+
+
+
+---------------------------------------------------------------------------------------------------+
\ No newline at end of file
diff --git a/src/site/apt/examples_dozer.apt b/src/site/apt/examples_dozer.apt
index a7c0f159..593664e4 100644
--- a/src/site/apt/examples_dozer.apt
+++ b/src/site/apt/examples_dozer.apt
@@ -1,189 +1,189 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ----------------------------------------
- Reading and writing CSV files with Dozer
- ----------------------------------------
-
-Reading and writing CSV files with Dozer
-
- This page contains some examples of reading and writing CSV files using Super CSV and Dozer.
- For a closer look, refer to the {{{./xref-test/org/supercsv/example/dozer/Reading.html}reading}} and
- {{{./xref-test/org/supercsv/example/dozer/Writing.html}writing}} example source.
-
- If you haven't already, check out the {{{./dozer.html}background}} on the Super CSV Dozer extension.
-
-* Example CSV file
-
- Here is an example CSV file that represents responses to a survey (we'll be using this in the following reading examples).
- It has a header and 3 rows of data, all with 8 columns.
-
----------------------------------------------------------------------------------------------------------------
-age,consentGiven,questionNo1,answer1,questionNo2,answer2,questionNo3,answer3
-18,Y,1,Twelve,2,Albert Einstein,3,Big Bang Theory
-,Y,1,Thirteen,2,Nikola Tesla,3,Stargate
-42,N,1,,2,Carl Sagan,3,Star Wars
----------------------------------------------------------------------------------------------------------------
-
-* Reading with CsvDozerBeanReader
-
- {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanReader.html}CsvDozerBeanReader}} is the most powerful CSV reader.
- The example reads each row from the example CSV file into a {{{./xref-test/org/supercsv/mock/dozer/SurveyResponse.html}SurveyResponse}} bean
- , which has a Collection of {{{./xref-test/org/supercsv/mock/dozer/Answer.html}Answer}}s.
-
- To do this requires the following field mapping (notice that the first two mappings are the same as you'd have for
- CsvBeanReader, but the rest use indexed and deep mapping).
-
-+-------------------------------------------------------------------------------------------------------------+
-private static final String[] FIELD_MAPPING = new String[] {
- "age", // simple field mapping (like CsvBeanReader)
- "consentGiven", // as above
- "answers[0].questionNo", // indexed (first element) + deep mapping
- "answers[0].answer",
- "answers[1].questionNo", // indexed (second element) + deep mapping
- "answers[1].answer",
- "answers[2].questionNo",
- "answers[2].answer" };
-+-------------------------------------------------------------------------------------------------------------+
-
- If you are familiar with the standard CsvBeanReader, you'll notice that using CsvDozerBeanReader is very similar.
- The main difference is that CsvDozerBeanReader requires you to configure it (with the <<>> method)
- prior to reading. You can still use the result of <<>> as your field mapping, but you'll have to supply
- your own if you want to use deep mapping or index-based mapping.
-
-+-------------------------------------------------------------------------------------------------------------+
-/**
- * An example of reading using CsvDozerBeanReader.
- */
-private static void readWithCsvDozerBeanReader() throws Exception {
-
- final CellProcessor[] processors = new CellProcessor[] {
- new Optional(new ParseInt()), // age
- new ParseBool(), // consent
- new ParseInt(), // questionNo 1
- new Optional(), // answer 1
- new ParseInt(), // questionNo 2
- new Optional(), // answer 2
- new ParseInt(), // questionNo 3
- new Optional() // answer 3
- };
-
- ICsvDozerBeanReader beanReader = null;
- try {
- beanReader = new CsvDozerBeanReader(new FileReader(CSV_FILENAME), CsvPreference.STANDARD_PREFERENCE);
-
- beanReader.getHeader(true); // ignore the header
- beanReader.configureBeanMapping(SurveyResponse.class, FIELD_MAPPING);
-
- SurveyResponse surveyResponse;
- while( (surveyResponse = beanReader.read(SurveyResponse.class, processors)) != null ) {
- System.out.println(String.format("lineNo=%s, rowNo=%s, surveyResponse=%s", beanReader.getLineNumber(),
- beanReader.getRowNumber(), surveyResponse));
- }
-
- }
- finally {
- if( beanReader != null ) {
- beanReader.close();
- }
- }
-}
-+-------------------------------------------------------------------------------------------------------------+
-
- Output:
-
----------------------------------------------------------------------------------------------------------------
-lineNo=2, rowNo=2, surveyResponse=SurveyResponse [age=18, consentGiven=true, answers=[Answer [questionNo=1, answer=Twelve], Answer [questionNo=2, answer=Albert Einstein], Answer [questionNo=3, answer=Big Bang Theory]]]
-lineNo=3, rowNo=3, surveyResponse=SurveyResponse [age=null, consentGiven=true, answers=[Answer [questionNo=1, answer=Thirteen], Answer [questionNo=2, answer=Nikola Tesla], Answer [questionNo=3, answer=Stargate]]]
-lineNo=4, rowNo=4, surveyResponse=SurveyResponse [age=42, consentGiven=false, answers=[Answer [questionNo=1, answer=null], Answer [questionNo=2, answer=Carl Sagan], Answer [questionNo=3, answer=Star Wars]]]
----------------------------------------------------------------------------------------------------------------
-
-* Partial reading with CsvDozerBeanReader
-
- Partial reading with CsvDozerBeanReader is virtually identical to CsvBeanReader.
- See the partial reading example in the {{{./xref-test/org/supercsv/example/dozer/Reading.html}reading example source}}.
-
-* Writing with CsvDozerBeanWriter
-
- {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanWriter.html}CsvDozerBeanWriter}} is the most powerful CSV writer.
- The example writes each CSV row from a {{{./xref-test/org/supercsv/mock/dozer/SurveyResponse.html}SurveyResponse}} bean
- , which has a Collection of {{{./xref-test/org/supercsv/mock/dozer/Answer.html}Answer}}s.
-
- It uses exactly the same field mapping as the reading example above, and once again you'll notice that CsvDozerBeanWriter
- requires you to configure it (with the <<>> method) prior to writing.
-
-+-------------------------------------------------------------------------------------------------------------+
-/**
- * An example of writing using CsvDozerBeanWriter.
- */
-private static void writeWithDozerCsvBeanWriter() throws Exception {
-
- final CellProcessor[] processors = new CellProcessor[] {
- new Optional(), // age
- new FmtBool("Y", "N"), // consent
- new NotNull(), // questionNo 1
- new Optional(), // answer 1
- new NotNull(), // questionNo 2
- new Optional(), // answer 2
- new NotNull(), // questionNo 3
- new Optional() }; // answer 4
-
- // create the survey responses to write
- SurveyResponse response1 = new SurveyResponse(18, true, Arrays.asList(new Answer(1, "Twelve"), new Answer(2,
- "Albert Einstein"), new Answer(3, "Big Bang Theory")));
- SurveyResponse response2 = new SurveyResponse(null, true, Arrays.asList(new Answer(1, "Thirteen"), new Answer(2,
- "Nikola Tesla"), new Answer(3, "Stargate")));
- SurveyResponse response3 = new SurveyResponse(42, false, Arrays.asList(new Answer(1, null), new Answer(2,
- "Carl Sagan"), new Answer(3, "Star Wars")));
- final List surveyResponses = Arrays.asList(response1, response2, response3);
-
- ICsvDozerBeanWriter beanWriter = null;
- try {
- beanWriter = new CsvDozerBeanWriter(new FileWriter("target/writeWithCsvDozerBeanWriter.csv"),
- CsvPreference.STANDARD_PREFERENCE);
-
- // configure the mapping from the fields to the CSV columns
- beanWriter.configureBeanMapping(SurveyResponse.class, FIELD_MAPPING);
-
- // write the header
- beanWriter.writeHeader("age", "consentGiven", "questionNo1", "answer1", "questionNo2", "answer2",
- "questionNo3", "answer3");
-
- // write the beans
- for( final SurveyResponse surveyResponse : surveyResponses ) {
- beanWriter.write(surveyResponse, processors);
- }
-
- }
- finally {
- if( beanWriter != null ) {
- beanWriter.close();
- }
- }
-}
-+-------------------------------------------------------------------------------------------------------------+
-
- Output:
-
----------------------------------------------------------------------------------------------------------------
-lineNo=2, rowNo=2, surveyResponse=SurveyResponse [age=18, consentGiven=true, answers=[Answer [questionNo=1, answer=Twelve], Answer [questionNo=2, answer=Albert Einstein], Answer [questionNo=3, answer=Big Bang Theory]]]
-lineNo=3, rowNo=3, surveyResponse=SurveyResponse [age=null, consentGiven=true, answers=[Answer [questionNo=1, answer=Thirteen], Answer [questionNo=2, answer=Nikola Tesla], Answer [questionNo=3, answer=Stargate]]]
-lineNo=4, rowNo=4, surveyResponse=SurveyResponse [age=42, consentGiven=false, answers=[Answer [questionNo=1, answer=null], Answer [questionNo=2, answer=Carl Sagan], Answer [questionNo=3, answer=Star Wars]]]
----------------------------------------------------------------------------------------------------------------
-
-* Partial writing with CsvDozerBeanWriter
-
- Partial writing with CsvDozerBeanWriter is virtually identical to CsvBeanWriter.
- See the partial writing example in the {{{./xref-test/org/supercsv/example/dozer/Writing.html}writing example source}}.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ----------------------------------------
+ Reading and writing CSV files with Dozer
+ ----------------------------------------
+
+Reading and writing CSV files with Dozer
+
+ This page contains some examples of reading and writing CSV files using Super CSV and Dozer.
+ For a closer look, refer to the {{{./xref-test/org/supercsv/example/dozer/Reading.html}reading}} and
+ {{{./xref-test/org/supercsv/example/dozer/Writing.html}writing}} example source.
+
+ If you haven't already, check out the {{{./dozer.html}background}} on the Super CSV Dozer extension.
+
+* Example CSV file
+
+ Here is an example CSV file that represents responses to a survey (we'll be using this in the following reading examples).
+ It has a header and 3 rows of data, all with 8 columns.
+
+---------------------------------------------------------------------------------------------------------------
+age,consentGiven,questionNo1,answer1,questionNo2,answer2,questionNo3,answer3
+18,Y,1,Twelve,2,Albert Einstein,3,Big Bang Theory
+,Y,1,Thirteen,2,Nikola Tesla,3,Stargate
+42,N,1,,2,Carl Sagan,3,Star Wars
+---------------------------------------------------------------------------------------------------------------
+
+* Reading with CsvDozerBeanReader
+
+ {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanReader.html}CsvDozerBeanReader}} is the most powerful CSV reader.
+ The example reads each row from the example CSV file into a {{{./xref-test/org/supercsv/mock/dozer/SurveyResponse.html}SurveyResponse}} bean
+ , which has a Collection of {{{./xref-test/org/supercsv/mock/dozer/Answer.html}Answer}}s.
+
+ To do this requires the following field mapping (notice that the first two mappings are the same as you'd have for
+ CsvBeanReader, but the rest use indexed and deep mapping).
+
++-------------------------------------------------------------------------------------------------------------+
+private static final String[] FIELD_MAPPING = new String[] {
+ "age", // simple field mapping (like CsvBeanReader)
+ "consentGiven", // as above
+ "answers[0].questionNo", // indexed (first element) + deep mapping
+ "answers[0].answer",
+ "answers[1].questionNo", // indexed (second element) + deep mapping
+ "answers[1].answer",
+ "answers[2].questionNo",
+ "answers[2].answer" };
++-------------------------------------------------------------------------------------------------------------+
+
+ If you are familiar with the standard CsvBeanReader, you'll notice that using CsvDozerBeanReader is very similar.
+ The main difference is that CsvDozerBeanReader requires you to configure it (with the <<>> method)
+ prior to reading. You can still use the result of <<>> as your field mapping, but you'll have to supply
+ your own if you want to use deep mapping or index-based mapping.
+
++-------------------------------------------------------------------------------------------------------------+
+/**
+ * An example of reading using CsvDozerBeanReader.
+ */
+private static void readWithCsvDozerBeanReader() throws Exception {
+
+ final CellProcessor[] processors = new CellProcessor[] {
+ new Optional(new ParseInt()), // age
+ new ParseBool(), // consent
+ new ParseInt(), // questionNo 1
+ new Optional(), // answer 1
+ new ParseInt(), // questionNo 2
+ new Optional(), // answer 2
+ new ParseInt(), // questionNo 3
+ new Optional() // answer 3
+ };
+
+ ICsvDozerBeanReader beanReader = null;
+ try {
+ beanReader = new CsvDozerBeanReader(new FileReader(CSV_FILENAME), CsvPreference.STANDARD_PREFERENCE);
+
+ beanReader.getHeader(true); // ignore the header
+ beanReader.configureBeanMapping(SurveyResponse.class, FIELD_MAPPING);
+
+ SurveyResponse surveyResponse;
+ while( (surveyResponse = beanReader.read(SurveyResponse.class, processors)) != null ) {
+ System.out.println(String.format("lineNo=%s, rowNo=%s, surveyResponse=%s", beanReader.getLineNumber(),
+ beanReader.getRowNumber(), surveyResponse));
+ }
+
+ }
+ finally {
+ if( beanReader != null ) {
+ beanReader.close();
+ }
+ }
+}
++-------------------------------------------------------------------------------------------------------------+
+
+ Output:
+
+---------------------------------------------------------------------------------------------------------------
+lineNo=2, rowNo=2, surveyResponse=SurveyResponse [age=18, consentGiven=true, answers=[Answer [questionNo=1, answer=Twelve], Answer [questionNo=2, answer=Albert Einstein], Answer [questionNo=3, answer=Big Bang Theory]]]
+lineNo=3, rowNo=3, surveyResponse=SurveyResponse [age=null, consentGiven=true, answers=[Answer [questionNo=1, answer=Thirteen], Answer [questionNo=2, answer=Nikola Tesla], Answer [questionNo=3, answer=Stargate]]]
+lineNo=4, rowNo=4, surveyResponse=SurveyResponse [age=42, consentGiven=false, answers=[Answer [questionNo=1, answer=null], Answer [questionNo=2, answer=Carl Sagan], Answer [questionNo=3, answer=Star Wars]]]
+---------------------------------------------------------------------------------------------------------------
+
+* Partial reading with CsvDozerBeanReader
+
+ Partial reading with CsvDozerBeanReader is virtually identical to CsvBeanReader.
+ See the partial reading example in the {{{./xref-test/org/supercsv/example/dozer/Reading.html}reading example source}}.
+
+* Writing with CsvDozerBeanWriter
+
+ {{{./apidocs/org/supercsv/io/dozer/CsvDozerBeanWriter.html}CsvDozerBeanWriter}} is the most powerful CSV writer.
+ The example writes each CSV row from a {{{./xref-test/org/supercsv/mock/dozer/SurveyResponse.html}SurveyResponse}} bean
+ , which has a Collection of {{{./xref-test/org/supercsv/mock/dozer/Answer.html}Answer}}s.
+
+ It uses exactly the same field mapping as the reading example above, and once again you'll notice that CsvDozerBeanWriter
+ requires you to configure it (with the <<>> method) prior to writing.
+
++-------------------------------------------------------------------------------------------------------------+
+/**
+ * An example of writing using CsvDozerBeanWriter.
+ */
+private static void writeWithDozerCsvBeanWriter() throws Exception {
+
+ final CellProcessor[] processors = new CellProcessor[] {
+ new Optional(), // age
+ new FmtBool("Y", "N"), // consent
+ new NotNull(), // questionNo 1
+ new Optional(), // answer 1
+ new NotNull(), // questionNo 2
+ new Optional(), // answer 2
+ new NotNull(), // questionNo 3
+ new Optional() }; // answer 4
+
+ // create the survey responses to write
+ SurveyResponse response1 = new SurveyResponse(18, true, Arrays.asList(new Answer(1, "Twelve"), new Answer(2,
+ "Albert Einstein"), new Answer(3, "Big Bang Theory")));
+ SurveyResponse response2 = new SurveyResponse(null, true, Arrays.asList(new Answer(1, "Thirteen"), new Answer(2,
+ "Nikola Tesla"), new Answer(3, "Stargate")));
+ SurveyResponse response3 = new SurveyResponse(42, false, Arrays.asList(new Answer(1, null), new Answer(2,
+ "Carl Sagan"), new Answer(3, "Star Wars")));
+ final List surveyResponses = Arrays.asList(response1, response2, response3);
+
+ ICsvDozerBeanWriter beanWriter = null;
+ try {
+ beanWriter = new CsvDozerBeanWriter(new FileWriter("target/writeWithCsvDozerBeanWriter.csv"),
+ CsvPreference.STANDARD_PREFERENCE);
+
+ // configure the mapping from the fields to the CSV columns
+ beanWriter.configureBeanMapping(SurveyResponse.class, FIELD_MAPPING);
+
+ // write the header
+ beanWriter.writeHeader("age", "consentGiven", "questionNo1", "answer1", "questionNo2", "answer2",
+ "questionNo3", "answer3");
+
+ // write the beans
+ for( final SurveyResponse surveyResponse : surveyResponses ) {
+ beanWriter.write(surveyResponse, processors);
+ }
+
+ }
+ finally {
+ if( beanWriter != null ) {
+ beanWriter.close();
+ }
+ }
+}
++-------------------------------------------------------------------------------------------------------------+
+
+ Output:
+
+---------------------------------------------------------------------------------------------------------------
+lineNo=2, rowNo=2, surveyResponse=SurveyResponse [age=18, consentGiven=true, answers=[Answer [questionNo=1, answer=Twelve], Answer [questionNo=2, answer=Albert Einstein], Answer [questionNo=3, answer=Big Bang Theory]]]
+lineNo=3, rowNo=3, surveyResponse=SurveyResponse [age=null, consentGiven=true, answers=[Answer [questionNo=1, answer=Thirteen], Answer [questionNo=2, answer=Nikola Tesla], Answer [questionNo=3, answer=Stargate]]]
+lineNo=4, rowNo=4, surveyResponse=SurveyResponse [age=42, consentGiven=false, answers=[Answer [questionNo=1, answer=null], Answer [questionNo=2, answer=Carl Sagan], Answer [questionNo=3, answer=Star Wars]]]
+---------------------------------------------------------------------------------------------------------------
+
+* Partial writing with CsvDozerBeanWriter
+
+ Partial writing with CsvDozerBeanWriter is virtually identical to CsvBeanWriter.
+ See the partial writing example in the {{{./xref-test/org/supercsv/example/dozer/Writing.html}writing example source}}.
diff --git a/src/site/apt/examples_new_cell_processor.apt b/src/site/apt/examples_new_cell_processor.apt
index 766d4692..7e69a6bd 100644
--- a/src/site/apt/examples_new_cell_processor.apt
+++ b/src/site/apt/examples_new_cell_processor.apt
@@ -1,109 +1,109 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ------------------------------
- Writing custom cell processors
- ------------------------------
-
-Writing custom cell processors
-
- Super CSV provides a wide variety of useful cell processors, but you are free to write your own if need to.
- If you think other people might benefit from your custom cell processor, send us a patch and we'll consider adding
- it to the next version of Super CSV.
-
- So how do you write a custom cell processor?
-
- Let's say you're trying to read a CSV file that has a day column, and you've written your own enumeration
- to represent that.
-
-
-+------------------------------------------------------------------------------------------------------+
-package org.supercsv.example;
-
-/**
- * An enumeration of days.
- */
-public enum Day {
- MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY
-}
-+------------------------------------------------------------------------------------------------------+
-
- You could write the following processor to parse the column to your enum (ignoring the case of the input).
-
-+------------------------------------------------------------------------------------------------------+
-package org.supercsv.example;
-
-import org.supercsv.cellprocessor.CellProcessorAdaptor;
-import org.supercsv.cellprocessor.ift.CellProcessor;
-import org.supercsv.exception.SuperCsvCellProcessorException;
-import org.supercsv.util.CsvContext;
-
-/**
- * An example of a custom cell processor.
- */
-public class ParseDay extends CellProcessorAdaptor {
-
- public ParseDay() {
- super();
- }
-
- public ParseDay(CellProcessor next) {
- // this constructor allows other processors to be chained after ParseDay
- super(next);
- }
-
- public Object execute(Object value, CsvContext context) {
-
- validateInputNotNull(value, context); // throws an Exception if the input is null
-
- for (Day day : Day.values()){
- if (day.name().equalsIgnoreCase(value.toString())){
- // passes the Day enum to the next processor in the chain
- return next.execute(day, context);
- }
- }
-
- throw new SuperCsvCellProcessorException(
- String.format("Could not parse '%s' as a day", value), context, this);
- }
-}
-+------------------------------------------------------------------------------------------------------+
-
- The important things to note above are:
-
- * the processor must extend {{{./apidocs/org/supercsv/cellprocessor/CellProcessorAdaptor.html}CellProcessorAdaptor}} -
- this ensures it implements the <<>> interface and can be chained to other processors
-
- * it has a no-args constructor (for when this is the last or only processor in the chain)
-
- * it has constructor that allows another processor to be chained afterwards (it must call <<>>)
-
- * if the processor required further configuration, additional parameters could be added to the constructors
-
- * input is mandatory for this processor, so it calls its inherited <<>> method,
- which throws an Exception for null input
-
- * the return statement for the <<>> method actually invokes the <<>> method of the next processor in the chain.
- If there is no next processor, the value will simply be returned, otherwise the next processor is free to perform additional processing.
- If your processor doesn't allow chaining at all, then you could simply return the value instead (i.e. <<>> in the above example).
-
- * if the processor fails to parse the input (it doesn't match any of the days), it throws an Exception with
- a meaningful message, the current context (which will contain the line/row/column numbers), and a reference to the
- processor.
-
- []
-
- For more ideas, take a look at the existing cell processors in the {{{./xref/index.html}project source}}.
-
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ------------------------------
+ Writing custom cell processors
+ ------------------------------
+
+Writing custom cell processors
+
+ Super CSV provides a wide variety of useful cell processors, but you are free to write your own if need to.
+ If you think other people might benefit from your custom cell processor, send us a patch and we'll consider adding
+ it to the next version of Super CSV.
+
+ So how do you write a custom cell processor?
+
+ Let's say you're trying to read a CSV file that has a day column, and you've written your own enumeration
+ to represent that.
+
+
++------------------------------------------------------------------------------------------------------+
+package org.supercsv.example;
+
+/**
+ * An enumeration of days.
+ */
+public enum Day {
+ MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY
+}
++------------------------------------------------------------------------------------------------------+
+
+ You could write the following processor to parse the column to your enum (ignoring the case of the input).
+
++------------------------------------------------------------------------------------------------------+
+package org.supercsv.example;
+
+import org.supercsv.cellprocessor.CellProcessorAdaptor;
+import org.supercsv.cellprocessor.ift.CellProcessor;
+import org.supercsv.exception.SuperCsvCellProcessorException;
+import org.supercsv.util.CsvContext;
+
+/**
+ * An example of a custom cell processor.
+ */
+public class ParseDay extends CellProcessorAdaptor {
+
+ public ParseDay() {
+ super();
+ }
+
+ public ParseDay(CellProcessor next) {
+ // this constructor allows other processors to be chained after ParseDay
+ super(next);
+ }
+
+ public Object execute(Object value, CsvContext context) {
+
+ validateInputNotNull(value, context); // throws an Exception if the input is null
+
+ for (Day day : Day.values()){
+ if (day.name().equalsIgnoreCase(value.toString())){
+ // passes the Day enum to the next processor in the chain
+ return next.execute(day, context);
+ }
+ }
+
+ throw new SuperCsvCellProcessorException(
+ String.format("Could not parse '%s' as a day", value), context, this);
+ }
+}
++------------------------------------------------------------------------------------------------------+
+
+ The important things to note above are:
+
+ * the processor must extend {{{./apidocs/org/supercsv/cellprocessor/CellProcessorAdaptor.html}CellProcessorAdaptor}} -
+ this ensures it implements the <<>> interface and can be chained to other processors
+
+ * it has a no-args constructor (for when this is the last or only processor in the chain)
+
+ * it has constructor that allows another processor to be chained afterwards (it must call <<>>)
+
+ * if the processor required further configuration, additional parameters could be added to the constructors
+
+ * input is mandatory for this processor, so it calls its inherited <<>> method,
+ which throws an Exception for null input
+
+ * the return statement for the <<>> method actually invokes the <<>> method of the next processor in the chain.
+ If there is no next processor, the value will simply be returned, otherwise the next processor is free to perform additional processing.
+ If your processor doesn't allow chaining at all, then you could simply return the value instead (i.e. <<>> in the above example).
+
+ * if the processor fails to parse the input (it doesn't match any of the days), it throws an Exception with
+ a meaningful message, the current context (which will contain the line/row/column numbers), and a reference to the
+ processor.
+
+ []
+
+ For more ideas, take a look at the existing cell processors in the {{{./xref/index.html}project source}}.
+
diff --git a/src/site/apt/examples_partial_reading.apt b/src/site/apt/examples_partial_reading.apt
index 6bf58958..6b987f3f 100644
--- a/src/site/apt/examples_partial_reading.apt
+++ b/src/site/apt/examples_partial_reading.apt
@@ -1,130 +1,130 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ---------------
- Partial reading
- ---------------
-
-Partial reading
-
- Partial reading allows you to ignore columns when reading CSV files by simply setting the appropriate header columns
- to <<>>.
-
- The examples on this page use the same example CSV file as the {{{./examples_reading.html}reading examples}}, and
- the full source can be found {{{./xref-test/org/supercsv/example/Reading.html}here}}.
-
-* Partial reading with CsvBeanReader
-
- As you can see from the output of this example, the fields associated with the ignored columns kept their default values -
- only the <<>>, <<>>, and <<>> are populated.
-
- Also note that the cell processors associated with the ignored columns were also set to <<>> to avoid any unnecessary
- processing (cell processors are always executed).
-
-+-------------------------------------------------------------------------------------------------------------+
-/**
- * An example of partial reading using CsvBeanReader.
- */
-private static void partialReadWithCsvBeanReader() throws Exception {
-
- ICsvBeanReader beanReader = null;
- try {
- beanReader = new CsvBeanReader(new FileReader(CSV_FILENAME), CsvPreference.STANDARD_PREFERENCE);
-
- beanReader.getHeader(true); // skip past the header (we're defining our own)
-
- // only map the first 3 columns - setting header elements to null means those columns are ignored
- final String[] header = new String[] { "customerNo", "firstName", "lastName", null, null, null, null, null,
- null, null };
-
- // no processing required for ignored columns
- final CellProcessor[] processors = new CellProcessor[] { new UniqueHashCode(), new NotNull(),
- new NotNull(), null, null, null, null, null, null, null };
-
- CustomerBean customer;
- while( (customer = beanReader.read(CustomerBean.class, header, processors)) != null ) {
- System.out.println(String.format("lineNo=%s, rowNo=%s, customer=%s", beanReader.getLineNumber(),
- beanReader.getRowNumber(), customer));
- }
-
- }
- finally {
- if( beanReader != null ) {
- beanReader.close();
- }
- }
-}
-+-------------------------------------------------------------------------------------------------------------+
-
- Output:
-
----------------------------------------------------------------------------------------------------------------
-lineNo=4, rowNo=2, customer=CustomerBean [customerNo=1, firstName=John, lastName=Dunbar, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
-lineNo=7, rowNo=3, customer=CustomerBean [customerNo=2, firstName=Bob, lastName=Down, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
-lineNo=10, rowNo=4, customer=CustomerBean [customerNo=3, firstName=Alice, lastName=Wunderland, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
-lineNo=13, rowNo=5, customer=CustomerBean [customerNo=4, firstName=Bill, lastName=Jobs, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
----------------------------------------------------------------------------------------------------------------
-
-* Partial reading with CsvMapReader
-
- As you can see from the output of this example, the output Map only has entries for <<>>, <<>>, and <<>> - the other fields were ignored.
-
- Unlike the CsvBeanReader example above, this example defines processors for all columns. This means that constraint validation is still applied to the ignored columns,
- but they don't appear in the output Map.
-
-+-------------------------------------------------------------------------------------------------------------+
-/**
- * An example of partial reading using CsvMapReader.
- */
-private static void partialReadWithCsvMapReader() throws Exception {
-
- ICsvMapReader mapReader = null;
- try {
- mapReader = new CsvMapReader(new FileReader(CSV_FILENAME), CsvPreference.STANDARD_PREFERENCE);
-
- mapReader.getHeader(true); // skip past the header (we're defining our own)
-
- // only map the first 3 columns - setting header elements to null means those columns are ignored
- final String[] header = new String[] { "customerNo", "firstName", "lastName", null, null, null, null, null,
- null, null };
-
- // apply some constraints to ignored columns (just because we can)
- final CellProcessor[] processors = new CellProcessor[] { new UniqueHashCode(), new NotNull(),
- new NotNull(), new NotNull(), new NotNull(), new Optional(), new Optional(), new NotNull(),
- new NotNull(), new LMinMax(0L, LMinMax.MAX_LONG) };
-
- Map customerMap;
- while( (customerMap = mapReader.read(header, processors)) != null ) {
- System.out.println(String.format("lineNo=%s, rowNo=%s, customerMap=%s", mapReader.getLineNumber(),
- mapReader.getRowNumber(), customerMap));
- }
-
- }
- finally {
- if( mapReader != null ) {
- mapReader.close();
- }
- }
-}
-+-------------------------------------------------------------------------------------------------------------+
-
- Output:
-
----------------------------------------------------------------------------------------------------------------
-lineNo=4, rowNo=2, customerMap={lastName=Dunbar, customerNo=1, firstName=John}
-lineNo=7, rowNo=3, customerMap={lastName=Down, customerNo=2, firstName=Bob}
-lineNo=10, rowNo=4, customerMap={lastName=Wunderland, customerNo=3, firstName=Alice}
-lineNo=13, rowNo=5, customerMap={lastName=Jobs, customerNo=4, firstName=Bill}
----------------------------------------------------------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~ Copyright 2007 Kasper B. Graversen
+~~
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ---------------
+ Partial reading
+ ---------------
+
+Partial reading
+
+ Partial reading allows you to ignore columns when reading CSV files by simply setting the appropriate header columns
+ to <<>>.
+
+ The examples on this page use the same example CSV file as the {{{./examples_reading.html}reading examples}}, and
+ the full source can be found {{{./xref-test/org/supercsv/example/Reading.html}here}}.
+
+* Partial reading with CsvBeanReader
+
+ As you can see from the output of this example, the fields associated with the ignored columns kept their default values -
+ only the <<>>, <<>>, and <<>> are populated.
+
+ Also note that the cell processors associated with the ignored columns were also set to <<>> to avoid any unnecessary
+ processing (cell processors are always executed).
+
++-------------------------------------------------------------------------------------------------------------+
+/**
+ * An example of partial reading using CsvBeanReader.
+ */
+private static void partialReadWithCsvBeanReader() throws Exception {
+
+ ICsvBeanReader beanReader = null;
+ try {
+ beanReader = new CsvBeanReader(new FileReader(CSV_FILENAME), CsvPreference.STANDARD_PREFERENCE);
+
+ beanReader.getHeader(true); // skip past the header (we're defining our own)
+
+ // only map the first 3 columns - setting header elements to null means those columns are ignored
+ final String[] header = new String[] { "customerNo", "firstName", "lastName", null, null, null, null, null,
+ null, null };
+
+ // no processing required for ignored columns
+ final CellProcessor[] processors = new CellProcessor[] { new UniqueHashCode(), new NotNull(),
+ new NotNull(), null, null, null, null, null, null, null };
+
+ CustomerBean customer;
+ while( (customer = beanReader.read(CustomerBean.class, header, processors)) != null ) {
+ System.out.println(String.format("lineNo=%s, rowNo=%s, customer=%s", beanReader.getLineNumber(),
+ beanReader.getRowNumber(), customer));
+ }
+
+ }
+ finally {
+ if( beanReader != null ) {
+ beanReader.close();
+ }
+ }
+}
++-------------------------------------------------------------------------------------------------------------+
+
+ Output:
+
+---------------------------------------------------------------------------------------------------------------
+lineNo=4, rowNo=2, customer=CustomerBean [customerNo=1, firstName=John, lastName=Dunbar, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
+lineNo=7, rowNo=3, customer=CustomerBean [customerNo=2, firstName=Bob, lastName=Down, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
+lineNo=10, rowNo=4, customer=CustomerBean [customerNo=3, firstName=Alice, lastName=Wunderland, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
+lineNo=13, rowNo=5, customer=CustomerBean [customerNo=4, firstName=Bill, lastName=Jobs, birthDate=null, mailingAddress=null, married=null, numberOfKids=null, favouriteQuote=null, email=null, loyaltyPoints=0]
+---------------------------------------------------------------------------------------------------------------
+
+* Partial reading with CsvMapReader
+
+ As you can see from the output of this example, the output Map only has entries for <<>>, <<>>, and <<>> - the other fields were ignored.
+
+ Unlike the CsvBeanReader example above, this example defines processors for all columns. This means that constraint validation is still applied to the ignored columns,
+ but they don't appear in the output Map.
+
++-------------------------------------------------------------------------------------------------------------+
+/**
+ * An example of partial reading using CsvMapReader.
+ */
+private static void partialReadWithCsvMapReader() throws Exception {
+
+ ICsvMapReader mapReader = null;
+ try {
+ mapReader = new CsvMapReader(new FileReader(CSV_FILENAME), CsvPreference.STANDARD_PREFERENCE);
+
+ mapReader.getHeader(true); // skip past the header (we're defining our own)
+
+ // only map the first 3 columns - setting header elements to null means those columns are ignored
+ final String[] header = new String[] { "customerNo", "firstName", "lastName", null, null, null, null, null,
+ null, null };
+
+ // apply some constraints to ignored columns (just because we can)
+ final CellProcessor[] processors = new CellProcessor[] { new UniqueHashCode(), new NotNull(),
+ new NotNull(), new NotNull(), new NotNull(), new Optional(), new Optional(), new NotNull(),
+ new NotNull(), new LMinMax(0L, LMinMax.MAX_LONG) };
+
+ Map customerMap;
+ while( (customerMap = mapReader.read(header, processors)) != null ) {
+ System.out.println(String.format("lineNo=%s, rowNo=%s, customerMap=%s", mapReader.getLineNumber(),
+ mapReader.getRowNumber(), customerMap));
+ }
+
+ }
+ finally {
+ if( mapReader != null ) {
+ mapReader.close();
+ }
+ }
+}
++-------------------------------------------------------------------------------------------------------------+
+
+ Output:
+
+---------------------------------------------------------------------------------------------------------------
+lineNo=4, rowNo=2, customerMap={lastName=Dunbar, customerNo=1, firstName=John}
+lineNo=7, rowNo=3, customerMap={lastName=Down, customerNo=2, firstName=Bob}
+lineNo=10, rowNo=4, customerMap={lastName=Wunderland, customerNo=3, firstName=Alice}
+lineNo=13, rowNo=5, customerMap={lastName=Jobs, customerNo=4, firstName=Bill}
+---------------------------------------------------------------------------------------------------------------
diff --git a/src/site/apt/examples_partial_writing.apt b/src/site/apt/examples_partial_writing.apt
index 61b9df9d..8c8b7922 100644
--- a/src/site/apt/examples_partial_writing.apt
+++ b/src/site/apt/examples_partial_writing.apt
@@ -1,201 +1,201 @@
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-~~ Copyright 2007 Kasper B. Graversen
-~~
-~~ Licensed under the Apache License, Version 2.0 (the "License");
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an "AS IS" BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ---------------
- Partial writing
- ---------------
-
-Partial writing
-
- Partial writing allows you to handle optional values in your data.
-
- The full source for these examples can be found {{{./xref-test/org/supercsv/example/Writing.html}here}}.
-
-* Partial writing with CsvBeanWriter
-
- As you can see in this example, we're only writing 5 of the available fields from the bean and 2 of those are optional.
-
- This example demonstrates the two options you have when writing optional fields:
-
- [[1]] specifying a default value if the value is <<>> by using <<>> - in this case <<<"no response">>> is written when <<>> is <<>>.
-
- [[2]] writing an empty column if the value is <<>> - as is done by specifying <<>> for <<>>
- (<<>> would have the same effect as <<>>, but it's not as meaningful)
-
- []
-
-+-------------------------------------------------------------------------------------------------------------+
-/**
- * An example of partial reading using CsvBeanWriter.
- */
-private static void partialWriteWithCsvBeanWriter() throws Exception {
-
- // create the customer beans
- final CustomerBean john = new CustomerBean("1", "John", "Dunbar",
- new GregorianCalendar(1945, Calendar.JUNE, 13).getTime(),
- "1600 Amphitheatre Parkway\nMountain View, CA 94043\nUnited States", null, null,
- "\"May the Force be with you.\" - Star Wars", "jdunbar@gmail.com", 0L);
- final CustomerBean bob = new CustomerBean("2", "Bob", "Down",
- new GregorianCalendar(1919, Calendar.FEBRUARY, 25).getTime(),
- "1601 Willow Rd.\nMenlo Park, CA 94025\nUnited States", true, 0,
- "\"Frankly, my dear, I don't give a damn.\" - Gone With The Wind", "bobdown@hotmail.com", 123456L);
- final List customers = Arrays.asList(john, bob);
-
- ICsvBeanWriter beanWriter = null;
- try {
- beanWriter = new CsvBeanWriter(new FileWriter("target/partialWriteWithCsvBeanWriter.csv"),
- CsvPreference.STANDARD_PREFERENCE);
-
- // only map 5 of the 10 fields
- final String[] header = new String[] { "customerNo", "firstName", "lastName", "married", "numberOfKids" };
-
- // assign a default value for married (if null), and write numberOfKids as an empty column if null
- final CellProcessor[] processors = new CellProcessor[] { new UniqueHashCode(), new NotNull(),
- new NotNull(), new ConvertNullTo("no response", new FmtBool("yes", "no")), new Optional() };
-
- // write the header
- beanWriter.writeHeader(header);
-
- // write the customer beans
- for( final CustomerBean customer : customers ) {
- beanWriter.write(customer, header, processors);
- }
-
- }
- finally {
- if( beanWriter != null ) {
- beanWriter.close();
- }
- }
-}
-+-------------------------------------------------------------------------------------------------------------+
-
- Output:
-
----------------------------------------------------------------------------------------------------------------
-customerNo,firstName,lastName,married,numberOfKids
-1,John,Dunbar,no response,
-2,Bob,Down,yes,0
----------------------------------------------------------------------------------------------------------------
-
-* Partial writing with CsvListWriter
-
- This example is identical to the one above, but uses CsvListWriter.
-
-+-------------------------------------------------------------------------------------------------------------+
-/**
- * An example of partial reading using CsvListWriter.
- */
-private static void partialWriteWithCsvListWriter() throws Exception {
-
- final String[] header = new String[] { "customerNo", "firstName", "lastName", "married", "numberOfKids" };
-
- // create the customer Lists (CsvListWriter also accepts arrays!)
- final List