diff --git a/cobol-parser/src/main/scala/za/co/absa/cobrix/cobol/processor/impl/CobolProcessorInPlace.scala b/cobol-parser/src/main/scala/za/co/absa/cobrix/cobol/processor/impl/CobolProcessorInPlace.scala index 252a04ce..59867282 100644 --- a/cobol-parser/src/main/scala/za/co/absa/cobrix/cobol/processor/impl/CobolProcessorInPlace.scala +++ b/cobol-parser/src/main/scala/za/co/absa/cobrix/cobol/processor/impl/CobolProcessorInPlace.scala @@ -46,16 +46,15 @@ class CobolProcessorInPlace(readerParameters: ReaderParameters, (rawRecordProcessor: RawRecordProcessor): Long = { val recordExtractor = CobolProcessorBase.getRecordExtractor(readerParameters, copybookContents, inputStream, None) - val dataStream = inputStream.copyStream() try { StreamProcessor.processStreamInPlace(copybook, options, - dataStream, + inputStream, recordExtractor, rawRecordProcessor, outputStream) } finally { - dataStream.close() + inputStream.close() } } diff --git a/spark-cobol/src/main/scala/za/co/absa/cobrix/spark/cobol/SparkCobolProcessor.scala b/spark-cobol/src/main/scala/za/co/absa/cobrix/spark/cobol/SparkCobolProcessor.scala index b10d4de9..8af67cfd 100644 --- a/spark-cobol/src/main/scala/za/co/absa/cobrix/spark/cobol/SparkCobolProcessor.scala +++ b/spark-cobol/src/main/scala/za/co/absa/cobrix/spark/cobol/SparkCobolProcessor.scala @@ -198,6 +198,8 @@ object SparkCobolProcessor { private def getCobolParameters(listOfFiles: Seq[String], copybookContents: String, options: Map[String, String], ignoreRedundantOptions: Boolean): CobolParameters = { val varLenOptions = options + (PARAM_GENERATE_RECORD_ID -> "true") + // This method might be called several times during the ebcdic file processing. If there are redundant options, they will be logged. + // `ignoreRedundantOptions=true` when the method is called just for copybook parsing so logging redundant options could be skipped. CobolParametersParser.parse(new Parameters(varLenOptions), !ignoreRedundantOptions) .copy(sourcePaths = listOfFiles, copybookContent = Option(copybookContents)) }