From 916169ae5e5ce0c7298a6cae2e6f1ccfd8a07cfe Mon Sep 17 00:00:00 2001 From: Greg Hogan Date: Wed, 3 Jan 2018 14:19:58 -0500 Subject: [PATCH] [hotfix] Fix typos Fix typos from the IntelliJ "Typos" inspection. --- .../org/apache/flink/client/CliFrontend.java | 2 +- .../elasticsearch/ElasticsearchSinkBase.java | 2 +- .../testutils/SourceSinkDataTestKit.java | 2 +- .../kafka/FlinkKafkaProducer011.java | 2 +- .../kafka/FlinkKafkaProducer011ITCase.java | 2 +- .../kafka/FlinkKafkaProducer08.java | 8 +- .../kafka/KafkaAvroTableSource.java | 2 +- .../connectors/kafka/KafkaTableSink.java | 2 +- .../connectors/kafka/KafkaTableSource.java | 4 +- .../kafka/config/OffsetCommitModes.java | 2 +- .../internals/ClosableBlockingQueue.java | 2 +- .../kafka/FlinkKafkaProducerBaseTest.java | 2 +- .../kafka/KafkaConsumerTestBase.java | 2 +- .../kinesis/proxy/KinesisProxy.java | 2 +- .../kinesis/FlinkKinesisProducerTest.java | 2 +- .../rabbitmq/common/RMQConnectionConfig.java | 2 +- .../hadoop/mapred/HadoopInputFormatBase.java | 2 +- .../mapred/HadoopIOFormatsITCase.java | 2 +- .../flink/addons/hbase/TableInputFormat.java | 2 +- .../addons/hbase/HBaseConnectorITCase.java | 2 +- .../create-docker-swarm-service.sh | 2 +- .../wikiedits/WikipediaEditsSourceTest.java | 2 +- .../streaming/state/RocksDBStateBackend.java | 4 +- .../exclamation/ExclamationWithBolt.java | 2 +- .../ExclamationWithSpoutITCase.java | 2 +- .../tests/StormFieldsGroupingITCase.java | 2 +- .../apache/flink/storm/api/FlinkClient.java | 2 +- .../flink/storm/wrappers/BoltWrapper.java | 26 +++--- .../wrappers/MergedInputsBoltWrapper.java | 12 +-- .../flink/storm/wrappers/SpoutWrapper.java | 20 ++--- .../storm/wrappers/WrapperSetupHelper.java | 4 +- .../flink/api/common/ExecutionConfig.java | 2 +- .../api/common/io/DelimitedInputFormat.java | 2 +- .../flink/api/common/io/FileInputFormat.java | 2 +- .../api/common/state/ListStateDescriptor.java | 2 +- .../api/common/state/MapStateDescriptor.java | 2 +- .../TypeSerializerSerializationUtil.java | 2 +- .../common/typeutils/base/CharComparator.java | 2 +- .../flink/api/java/functions/KeySelector.java | 2 +- .../api/java/typeutils/TypeExtractor.java | 4 +- ...oRegistrationSerializerConfigSnapshot.java | 11 +-- .../typeutils/runtime/PojoComparator.java | 2 +- .../runtime/kryo/KryoSerializer.java | 2 +- .../flink/configuration/ConfigConstants.java | 6 +- .../core/fs/local/LocalDataInputStream.java | 2 +- .../org/apache/flink/types/CharValue.java | 2 +- .../java/org/apache/flink/types/IntValue.java | 2 +- .../org/apache/flink/util/StringBasedID.java | 2 +- .../flink/configuration/MemorySizeTest.java | 4 +- .../org/apache/hadoop/conf/Configuration.java | 84 +++++++++---------- .../flink/api/java/io/CsvInputFormatTest.java | 6 +- .../api/java/operator/MaxByOperatorTest.java | 16 ++-- .../api/java/operator/MinByOperatorTest.java | 16 ++-- .../examples/java8/wordcount/WordCount.java | 2 +- .../examples/java8/wordcount/WordCount.java | 2 +- .../java/operators/lambdas/FilterITCase.java | 2 +- .../org/apache/flink/cep/nfa/NFATest.java | 2 +- .../random/RandomGenerableFactory.java | 2 +- .../apache/flink/ml/classification/SVM.scala | 2 +- .../outlier/StochasticOutlierSelection.scala | 4 +- .../apache/flink/ml/pipeline/Estimator.scala | 2 +- .../apache/flink/ml/pipeline/Predictor.scala | 2 +- .../flink/ml/pipeline/Transformer.scala | 10 +-- .../flink/ml/preprocessing/Splitter.scala | 2 +- .../apache/flink/ml/recommendation/ALS.scala | 2 +- .../resources/tableSourceConverter.properties | 4 +- .../flink/table/api/TableEnvironment.scala | 2 +- .../apache/flink/table/api/TableSchema.scala | 4 +- .../flink/table/calcite/FlinkTypeSystem.scala | 6 +- .../flink/table/codegen/CodeGenerator.scala | 2 +- .../table/functions/ScalarFunction.scala | 2 +- .../flink/table/functions/TableFunction.scala | 2 +- .../functions/utils/AggSqlFunction.scala | 2 +- .../utils/UserDefinedFunctionUtils.scala | 2 +- .../table/plan/nodes/CommonCorrelate.scala | 2 +- .../plan/nodes/dataset/DataSetJoin.scala | 6 +- .../nodes/datastream/retractionTraits.scala | 5 +- .../flink/table/plan/schema/InlineTable.scala | 4 +- ...crementalAggregateTimeWindowFunction.scala | 2 +- .../IncrementalAggregateWindowFunction.scala | 2 +- .../RowTimeSortProcessFunction.scala | 2 +- .../aggregate/RowTimeUnboundedOver.scala | 4 +- .../table/runtime/join/WindowJoinUtil.scala | 2 +- .../flink/table/sources/CsvTableSource.scala | 2 +- .../flink/table/sources/TableSourceUtil.scala | 2 +- .../flink/table/expressions/RowTypeTest.scala | 2 +- .../expressions/ScalarOperatorsTest.scala | 2 +- .../entrypoint/MesosEntrypointUtils.java | 2 +- .../services/AbstractMesosServices.java | 2 +- .../apache/flink/metrics/jmx/JMXReporter.java | 2 +- .../optimizer/traversals/PlanFinalizer.java | 2 +- .../UnionPropertyPropagationTest.java | 2 +- .../webmonitor/RedirectHandlerTest.java | 2 +- ...toryServerStaticFileServerHandlerTest.java | 2 +- .../web-dashboard/vendor-local/d3-timeline.js | 2 +- .../flink/runtime/akka/FlinkUntypedActor.java | 8 +- .../runtime/blob/PermanentBlobCache.java | 2 +- .../checkpoint/CheckpointCoordinator.java | 2 +- .../checkpoint/PendingCheckpointStats.java | 2 +- .../runtime/client/JobListeningContext.java | 2 +- .../clusterframework/BootstrapTools.java | 2 +- .../FlinkResourceManager.java | 4 +- .../overlays/HadoopConfOverlay.java | 2 +- .../overlays/KeytabOverlay.java | 2 +- .../types/ResourceProfile.java | 2 +- .../executiongraph/AccessExecutionGraph.java | 4 +- .../runtime/executiongraph/Execution.java | 6 +- .../executiongraph/ExecutionVertex.java | 2 +- .../FsNegativeRunningJobsRegistry.java | 2 +- .../partition/SpillableSubpartition.java | 2 +- .../iterative/task/AbstractIterativeTask.java | 2 +- .../flink/runtime/jobgraph/JobVertex.java | 2 +- .../flink/runtime/jobmaster/SlotContext.java | 2 +- .../jobmaster/slotpool/SlotPoolGateway.java | 2 +- .../flink/runtime/operators/BatchTask.java | 8 +- .../runtime/operators/CoGroupDriver.java | 4 +- .../hash/InPlaceMutableHashTable.java | 2 +- .../messages/ClusterConfigurationInfo.java | 2 +- .../state/AbstractKeyedStateBackend.java | 2 +- .../state/DefaultOperatorStateBackend.java | 2 +- .../runtime/state/SharedStateRegistry.java | 2 +- .../taskexecutor/slot/TaskSlotTable.java | 2 +- .../flink/runtime/taskmanager/Task.java | 2 +- .../apache/flink/runtime/akka/AkkaUtils.scala | 2 +- .../runtime/messages/ArchiveMessages.scala | 2 +- .../checkpoint/CheckpointCoordinatorTest.java | 2 +- .../checkpoint/PendingCheckpointTest.java | 2 +- ...oKeeperCompletedCheckpointStoreITCase.java | 2 +- .../client/JobClientActorRecoveryITCase.java | 2 +- .../executiongraph/GlobalModVersionTest.java | 2 +- .../heartbeat/HeartbeatManagerTest.java | 2 +- .../runtime/io/disk/ChannelViewsTest.java | 2 +- .../io/network/NetworkEnvironmentTest.java | 4 +- .../io/network/TaskEventDispatcherTest.java | 2 +- .../runtime/jobmanager/JobManagerTest.java | 2 +- .../metrics/groups/TaskMetricGroupTest.java | 2 +- .../query/KvStateLocationRegistryTest.java | 2 +- .../runtime/query/KvStateLocationTest.java | 2 +- .../slotmanager/SlotManagerTest.java | 2 +- .../checkpoints/CheckpointStatsCacheTest.java | 2 +- .../state/SharedStateRegistryTest.java | 2 +- .../runtime/state/StateBackendTestBase.java | 2 +- .../taskexecutor/TaskManagerServicesTest.java | 2 +- .../TestingTaskExecutorGateway.java | 2 +- .../TestingJobManagerMessages.scala | 2 +- .../flink/api/scala/ScalaShellITCase.scala | 2 +- .../api/scala/codegen/TypeAnalyzer.scala | 2 +- .../flink/api/scala/MaxByOperatorTest.scala | 16 ++-- .../flink/api/scala/MinByOperatorTest.scala | 16 ++-- .../streaming/api/CheckpointingMode.java | 2 +- .../api/datastream/CoGroupedStreams.java | 2 +- .../StreamExecutionEnvironment.java | 2 +- .../streaming/api/graph/StreamConfig.java | 2 +- .../api/graph/StreamGraphGenerator.java | 4 +- .../operators/HeapInternalTimerService.java | 2 +- .../operators/async/queue/AsyncResult.java | 2 +- .../queue/UnorderedStreamElementQueue.java | 2 +- .../CoFeedbackTransformation.java | 2 +- .../OneInputTransformation.java | 2 +- .../operators/windowing/MergingWindowSet.java | 2 +- .../operators/windowing/WindowOperator.java | 2 +- .../util/typeutils/FieldAccessor.java | 2 +- .../flink/streaming/api/DataStreamTest.java | 2 +- .../windowing/MergingWindowSetTest.java | 4 +- .../windowing/WindowOperatorContractTest.java | 4 +- .../WindowOperatorMigrationTest.java | 4 +- .../windowing/WindowOperatorTest.java | 2 +- .../tasks/StreamTaskTerminationTest.java | 2 +- .../runtime/tasks/StreamTaskTest.java | 2 +- .../api/scala/CoGroupedStreams.scala | 2 +- .../jar/CheckpointedStreamingProgram.java | 2 +- .../jar/CustomKvStateProgram.java | 2 +- .../flink/test/operators/JoinITCase.java | 2 +- .../ZooKeeperLeaderElectionITCase.java | 2 +- .../streaming/runtime/TimestampITCase.java | 4 +- .../apache/flink/test/util/CoordVector.java | 8 +- .../ParallelSessionsEventGenerator.java | 2 +- .../BatchScalaAPICompletenessTest.scala | 2 +- .../api/scala/operators/GroupingTest.scala | 2 +- .../flink/yarn/TestingYarnTaskManager.scala | 2 +- .../flink/yarn/YarnClusterDescriptorTest.java | 6 +- tools/create_release_files.sh | 2 +- tools/list_deps.py | 2 +- tools/merge_flink_pr.py | 2 +- 184 files changed, 332 insertions(+), 332 deletions(-) diff --git a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java index c535783119d5f..dff12f6b70d52 100644 --- a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java +++ b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java @@ -1164,7 +1164,7 @@ public CustomCommandLine getActiveCustomCommandLine(CommandLine commandLine) { /** * Retrieves the loaded custom command-lines. - * @return An unmodifiyable list of loaded custom command-lines. + * @return An unmodifiable list of loaded custom command-lines. */ public static List> getCustomCommandLineList() { return Collections.unmodifiableList(customCommandLines); diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java index c49d726454a27..fe4343ff58379 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java @@ -142,7 +142,7 @@ public void setDelayMillis(long delayMillis) { /** The user specified config map that we forward to Elasticsearch when we create the {@link Client}. */ private final Map userConfig; - /** The function that is used to construct mulitple {@link ActionRequest ActionRequests} from each incoming element. */ + /** The function that is used to construct multiple {@link ActionRequest ActionRequests} from each incoming element. */ private final ElasticsearchSinkFunction elasticsearchSinkFunction; /** User-provided handler for failed {@link ActionRequest ActionRequests}. */ diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java index 4e3d3e2c979b5..32498c6c109ed 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java @@ -67,7 +67,7 @@ public void cancel() { } /** - * A {@link ElasticsearchSinkFunction} that indexes each element it receives to a sepecified Elasticsearch index. + * A {@link ElasticsearchSinkFunction} that indexes each element it receives to a specified Elasticsearch index. */ public static class TestElasticsearchSinkFunction implements ElasticsearchSinkFunction> { private static final long serialVersionUID = 1L; diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java index b14e4871287e5..ccf11e75f0126 100644 --- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java +++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java @@ -170,7 +170,7 @@ public enum Semantic { public static final String KEY_DISABLE_METRICS = "flink.disable-metrics"; /** - * Descriptor of the transacionalIds list. + * Descriptor of the transactional IDs list. */ private static final ListStateDescriptor NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR = new ListStateDescriptor<>("next-transactional-id-hint", TypeInformation.of(NextTransactionalIdHint.class)); diff --git a/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java b/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java index 85735c80cc09e..3c3c86adeffea 100644 --- a/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java +++ b/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java @@ -101,7 +101,7 @@ public void testRestoreToCheckpointAfterExceedingProducersPool() throws Exceptio assertIsCausedBy(FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, ex); } - // Resume transactions before testHrness1 is being closed (in case of failures close() might not be called) + // Resume transactions before testHarness1 is being closed (in case of failures close() might not be called) try (OneInputStreamOperatorTestHarness testHarness2 = createTestHarness(topic)) { testHarness2.setup(); // restore from snapshot1, transactions with records 43 and 44 should be aborted diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java index d2f17d22c68aa..20900f061c881 100644 --- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java +++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java @@ -75,7 +75,7 @@ public FlinkKafkaProducer08(String topicId, SerializationSchema serializatio * @param topicId The topic to write data to * @param serializationSchema A (keyless) serializable serialization schema for turning user objects into a kafka-consumable byte[] * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. - * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions. + * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. */ public FlinkKafkaProducer08(String topicId, SerializationSchema serializationSchema, Properties producerConfig, FlinkKafkaPartitioner customPartitioner) { this(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), producerConfig, customPartitioner); @@ -120,7 +120,7 @@ public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema seriali * @param topicId The topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. - * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions. + * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. */ public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema serializationSchema, Properties producerConfig, FlinkKafkaPartitioner customPartitioner) { super(topicId, serializationSchema, producerConfig, customPartitioner); @@ -134,7 +134,7 @@ public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema seriali * @param topicId The topic to write data to * @param serializationSchema A (keyless) serializable serialization schema for turning user objects into a kafka-consumable byte[] * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. - * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions. + * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. * * @deprecated This is a deprecated constructor that does not correctly handle partitioning when * producing to multiple topics. Use @@ -151,7 +151,7 @@ public FlinkKafkaProducer08(String topicId, SerializationSchema serializatio * @param topicId The topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. - * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions. + * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. * * @deprecated This is a deprecated constructor that does not correctly handle partitioning when * producing to multiple topics. Use diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java index 055b67924822c..bf2e9db7dc236 100644 --- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java +++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java @@ -156,7 +156,7 @@ protected abstract static class Builder fieldMapping; /** - * Sets the class of the Avro records that aree read from the Kafka topic. + * Sets the class of the Avro records that are read from the Kafka topic. * * @param avroClass The class of the Avro records that are read from the Kafka topic. * @return The builder. diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java index f42827e5f35ce..e1b2d47f903a1 100644 --- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java +++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java @@ -61,7 +61,7 @@ public KafkaTableSink( } /** - * Returns the version-specifid Kafka producer. + * Returns the version-specific Kafka producer. * * @param topic Kafka topic to produce to. * @param properties Properties for the Kafka producer. diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java index d0ee7de7d847d..36cb8fd56d3aa 100644 --- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java +++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java @@ -307,7 +307,7 @@ public B withSchema(TableSchema schema) { /** * Configures a field of the table to be a processing time attribute. - * The configured field must be present in the tabel schema and of type {@link Types#SQL_TIMESTAMP()}. + * The configured field must be present in the table schema and of type {@link Types#SQL_TIMESTAMP()}. * * @param proctimeAttribute The name of the processing time attribute in the table schema. * @return The builder. @@ -322,7 +322,7 @@ public B withProctimeAttribute(String proctimeAttribute) { /** * Configures a field of the table to be a rowtime attribute. - * The configured field must be present in the tabel schema and of type {@link Types#SQL_TIMESTAMP()}. + * The configured field must be present in the table schema and of type {@link Types#SQL_TIMESTAMP()}. * * @param rowtimeAttribute The name of the rowtime attribute in the table schema. * @param timestampExtractor The {@link TimestampExtractor} to extract the rowtime attribute from the physical type. diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java index 9e1d9d5bf17b3..85dc2632ae431 100644 --- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java +++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java @@ -29,7 +29,7 @@ public class OffsetCommitModes { * @param enableCommitOnCheckpoint whether or not committing on checkpoints is enabled. * @param enableCheckpointing whether or not checkpoint is enabled for the consumer. * - * @return the offset commmit mode to use, based on the configuration values. + * @return the offset commit mode to use, based on the configuration values. */ public static OffsetCommitMode fromConfiguration( boolean enableAutoCommit, diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java index da61dd0cd11d8..db3273369467b 100644 --- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java +++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java @@ -70,7 +70,7 @@ public ClosableBlockingQueue() { /** * Creates a new empty queue, reserving space for at least the specified number - * of elements. The queu can still grow, of more elements are added than the + * of elements. The queue can still grow, of more elements are added than the * reserved space. * * @param initialSize The number of elements to reserve space for. diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java index d462953d5db4e..ad118aefb958f 100644 --- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java +++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java @@ -189,7 +189,7 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint, * it should be rethrown; we set a timeout because the test will not finish if the logic is broken. * - *

Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds. + *

Note that this test does not test the snapshot method is blocked correctly when there are pending records. * The test for that is covered in testAtLeastOnceProducer. */ @SuppressWarnings("unchecked") diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java index 0a5608a8d4c27..55a9c4d3b1492 100644 --- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java +++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java @@ -406,7 +406,7 @@ public void runStartFromLatestOffsets() throws Exception { final String consumeExtraRecordsJobName = "Consume Extra Records Job"; final String writeExtraRecordsJobName = "Write Extra Records Job"; - // seriliazation / deserialization schemas for writing and consuming the extra records + // serialization / deserialization schemas for writing and consuming the extra records final TypeInformation> resultType = TypeInformation.of(new TypeHint>() {}); diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java index 89e9f0416424c..7daaad2610c33 100644 --- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java +++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java @@ -58,7 +58,7 @@ *

NOTE: * In the AWS KCL library, there is a similar implementation - {@link com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy}. * This implementation differs mainly in that we can make operations to arbitrary Kinesis streams, which is a needed - * functionality for the Flink Kinesis Connecter since the consumer may simultaneously read from multiple Kinesis streams. + * functionality for the Flink Kinesis Connector since the consumer may simultaneously read from multiple Kinesis streams. */ public class KinesisProxy implements KinesisProxyInterface { diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java index 07c9cd7db7455..86cefff0c9c6a 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java @@ -167,7 +167,7 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint, * it should be rethrown; we set a timeout because the test will not finish if the logic is broken. * - *

Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds. + *

Note that this test does not test the snapshot method is blocked correctly when there are pending records. * The test for that is covered in testAtLeastOnceProducer. */ @SuppressWarnings("ResultOfMethodCallIgnored") diff --git a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java index cce800a92e91f..bbb48ce339104 100644 --- a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java +++ b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java @@ -355,7 +355,7 @@ public Builder setPassword(String password) { /** * Convenience method for setting the fields in an AMQP URI: host, * port, username, password and virtual host. If any part of the - * URI is ommited, the ConnectionFactory's corresponding variable + * URI is omitted, the ConnectionFactory's corresponding variable * is left unchanged. * @param uri is the AMQP URI containing the data * @return the Builder diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java index 5c26a58f9b89f..69ff2d1b466b4 100644 --- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java +++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java @@ -53,7 +53,7 @@ * * @param Type of key * @param Type of value - * @param The type iself + * @param The type itself */ @Internal public abstract class HadoopInputFormatBase extends HadoopInputFormatCommonBase { diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java index bbe639553ec20..46102a2cdc9c8 100644 --- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java +++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java @@ -51,7 +51,7 @@ import java.util.LinkedList; /** - * Integraiton tests for Hadoop IO formats. + * Integration tests for Hadoop IO formats. */ @RunWith(Parameterized.class) public class HadoopIOFormatsITCase extends JavaProgramTestBase { diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java index 52fd012264dfa..ebe25c819aece 100644 --- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java +++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java @@ -51,7 +51,7 @@ public abstract class TableInputFormat extends AbstractTableInp * The output from HBase is always an instance of {@link Result}. * This method is to copy the data in the Result instance into the required {@link Tuple} * @param r The Result instance from HBase that needs to be converted - * @return The approriate instance of {@link Tuple} that contains the needed information. + * @return The appropriate instance of {@link Tuple} that contains the needed information. */ protected abstract T mapResultToTuple(Result r); diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java index 3da4230b80ad4..1f643972b36f1 100644 --- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java +++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java @@ -312,7 +312,7 @@ public long eval(byte[] bytes) { } } - // ######## TableInputFormate tests ############ + // ######## TableInputFormat tests ############ class InputFormatForTestTable extends TableInputFormat> { diff --git a/flink-contrib/docker-flink/create-docker-swarm-service.sh b/flink-contrib/docker-flink/create-docker-swarm-service.sh index 4393a7023226c..0a9cc16681b71 100755 --- a/flink-contrib/docker-flink/create-docker-swarm-service.sh +++ b/flink-contrib/docker-flink/create-docker-swarm-service.sh @@ -50,5 +50,5 @@ docker network create -d overlay ${OVERLAY_NETWORK_NAME} # Create the jobmanager service docker service create --name ${JOB_MANAGER_NAME} --env JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS} -p ${SERVICE_PORT}:8081 --network ${OVERLAY_NETWORK_NAME} ${IMAGE_NAME} jobmanager -# Create the taskmanger service (scale this out as needed) +# Create the taskmanager service (scale this out as needed) docker service create --name ${TASK_MANAGER_NAME} --env JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS} --network ${OVERLAY_NETWORK_NAME} ${IMAGE_NAME} taskmanager diff --git a/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java b/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java index f6fa8e0b53924..44bf8eef20412 100644 --- a/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java +++ b/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java @@ -71,7 +71,7 @@ public void testWikipediaEditsSource() throws Exception { // Execute the source in a different thread and collect events into the queue. // We do this in a separate thread in order to not block the main test thread - // indefinitely in case that somethign bad happens (like not receiving any + // indefinitely in case that something bad happens (like not receiving any // events) executorService.execute(() -> { try { diff --git a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java index 6ec7ec836c04c..79771f3bfb8f4 100644 --- a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java +++ b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java @@ -82,7 +82,7 @@ public class RocksDBStateBackend extends AbstractStateBackend { /** The state backend that we use for creating checkpoint streams. */ private final AbstractStateBackend checkpointStreamBackend; - /** Operator identifier that is used to uniqueify the RocksDB storage path. */ + /** Operator identifier that is used to uniquify the RocksDB storage path. */ private String operatorIdentifier; /** JobID for uniquifying backup paths. */ @@ -202,7 +202,7 @@ public RocksDBStateBackend(AbstractStateBackend checkpointStreamBackend) { * {@link AbstractStateBackend#createStreamFactory(JobID, String) checkpoint stream}. * * @param checkpointStreamBackend The backend to store the - * @param enableIncrementalCheckpointing True if incremental checkponting is enabled + * @param enableIncrementalCheckpointing True if incremental checkpointing is enabled */ public RocksDBStateBackend(AbstractStateBackend checkpointStreamBackend, boolean enableIncrementalCheckpointing) { this.checkpointStreamBackend = requireNonNull(checkpointStreamBackend); diff --git a/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java b/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java index c31c36a427a80..b6bb4d594804d 100644 --- a/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java +++ b/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java @@ -36,7 +36,7 @@ *

The input is a plain text file with lines separated by newline characters. * *

Usage: - * ExclamationWithmBolt <text path> <result path> <number of exclamation marks>
+ * ExclamationWithBolt <text path> <result path> <number of exclamation marks>
* If no parameters are provided, the program is run with default data from {@link WordCountData} with x=2. * *

This example shows how to: diff --git a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java index 61310e897e85b..64294d1dd0263 100644 --- a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java +++ b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java @@ -23,7 +23,7 @@ import org.apache.flink.test.testdata.WordCountData; /** - * Test for the ExclamantionWithSpout example. + * Test for the ExclamationWithSpout example. */ public class ExclamationWithSpoutITCase extends StreamingProgramTestBase { diff --git a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java index c861c9e2587a1..6e02d81359d0b 100644 --- a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java +++ b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java @@ -77,7 +77,7 @@ protected void postSubmit() throws Exception { Collections.sort(expectedResults); System.out.println(actualResults); for (int i = 0; i < actualResults.size(); ++i) { - //compare against actual results with removed prefex (as it depends e.g. on the hash function used) + //compare against actual results with removed prefix (as it depends e.g. on the hash function used) Assert.assertEquals(expectedResults.get(i), actualResults.get(i)); } } diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java index d53ca42fc4729..24cb0fc469135 100644 --- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java +++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java @@ -156,7 +156,7 @@ public FlinkClient getClient() { return this; } - // The following methods are derived from "backtype.storm.generated.Nimubs.Client" + // The following methods are derived from "backtype.storm.generated.Nimbus.Client" /** * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java index 590faf382dbe3..ba2435e94d332 100644 --- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java +++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java @@ -75,7 +75,7 @@ public class BoltWrapper extends AbstractStreamOperator implements /** The IDs of the input streams for this bolt per producer task ID. */ private final HashMap inputStreamIds = new HashMap(); - /** The IDs of the producres for this bolt per producer task ID.. */ + /** The IDs of the producers for this bolt per producer task ID.. */ private final HashMap inputComponentIds = new HashMap(); /** The schema (ie, ordered field names) of the input streams per producer taskID. */ private final HashMap inputSchemas = new HashMap(); @@ -131,8 +131,8 @@ public BoltWrapper(final IRichBolt bolt, final Fields inputSchema) * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not within range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not within range * [1;25]. */ public BoltWrapper(final IRichBolt bolt, final String[] rawOutputs) @@ -153,8 +153,8 @@ public BoltWrapper(final IRichBolt bolt, final String[] rawOutputs) * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [1;25]. */ public BoltWrapper(final IRichBolt bolt, final Collection rawOutputs) throws IllegalArgumentException { @@ -176,8 +176,8 @@ public BoltWrapper(final IRichBolt bolt, final Collection rawOutputs) th * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public BoltWrapper( @@ -199,14 +199,14 @@ public BoltWrapper( * The Storm {@link IRichBolt bolt} to be used. * @param inputSchema * The schema (ie, ordered field names) of the input stream. @throws IllegalArgumentException If - * {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * @param rawOutputs * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public BoltWrapper(final IRichBolt bolt, final Fields inputSchema, @@ -229,8 +229,8 @@ public BoltWrapper(final IRichBolt bolt, final Fields inputSchema, * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public BoltWrapper(final IRichBolt bolt, final String name, final String inputStreamId, diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java index 07abffc80937c..88ae35521fa7d 100644 --- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java +++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java @@ -63,8 +63,8 @@ public MergedInputsBoltWrapper(final IRichBolt bolt) throws IllegalArgumentExcep * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not within range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not within range * [1;25]. */ public MergedInputsBoltWrapper(final IRichBolt bolt, final String[] rawOutputs) @@ -85,8 +85,8 @@ public MergedInputsBoltWrapper(final IRichBolt bolt, final String[] rawOutputs) * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [1;25]. */ public MergedInputsBoltWrapper(final IRichBolt bolt, final Collection rawOutputs) @@ -109,8 +109,8 @@ public MergedInputsBoltWrapper(final IRichBolt bolt, final Collection ra * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public MergedInputsBoltWrapper(final IRichBolt bolt, final String name, final Collection rawOutputs) diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java index 6d37b293fb850..882ba273fef6a 100644 --- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java +++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java @@ -115,8 +115,8 @@ public SpoutWrapper(final IRichSpout spout, final Integer numberOfInvocations) * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. (Can be {@code null}.) * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public SpoutWrapper(final IRichSpout spout, final String[] rawOutputs) @@ -141,8 +141,8 @@ public SpoutWrapper(final IRichSpout spout, final String[] rawOutputs) * terminates if no tuple was emitted for the first time. If value is {@code null}, finite invocation is * disabled. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public SpoutWrapper(final IRichSpout spout, final String[] rawOutputs, @@ -163,8 +163,8 @@ public SpoutWrapper(final IRichSpout spout, final String[] rawOutputs, * Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be * of a raw type. (Can be {@code null}.) * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public SpoutWrapper(final IRichSpout spout, final Collection rawOutputs) @@ -189,8 +189,8 @@ public SpoutWrapper(final IRichSpout spout, final Collection rawOutputs) * terminates if no tuple was emitted for the first time. If value is {@code null}, finite invocation is * disabled. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public SpoutWrapper(final IRichSpout spout, final Collection rawOutputs, @@ -217,8 +217,8 @@ public SpoutWrapper(final IRichSpout spout, final Collection rawOutputs, * terminates if no tuple was emitted for the first time. If value is {@code null}, finite invocation is * disabled. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ public SpoutWrapper(final IRichSpout spout, final String name, final Collection rawOutputs, diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java index 16112114c1856..1d3a544a830ca 100644 --- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java +++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java @@ -62,8 +62,8 @@ class WrapperSetupHelper { * {@link org.apache.flink.api.java.tuple.Tuple1 Tuple1} but be of a raw type. (Can be {@code null}.) * @return The number of attributes to be used for each stream. * @throws IllegalArgumentException - * If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if - * {@code rawOuput} is {@code false} and the number of declared output attributes is not with range + * If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if + * {@code rawOutput} is {@code false} and the number of declared output attributes is not with range * [0;25]. */ static HashMap getNumberOfAttributes(final IComponent spoutOrBolt, diff --git a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java index 9f39c468d36aa..fb888c07f27d1 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java @@ -205,7 +205,7 @@ public boolean isClosureCleanerEnabled() { } /** - * Sets the interval of the automatic watermark emission. Watermaks are used throughout + * Sets the interval of the automatic watermark emission. Watermarks are used throughout * the streaming system to keep track of the progress of time. They are used, for example, * for time based windowing. * diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java index 1d344b9f3a5a6..04b04b86e929f 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java @@ -588,7 +588,7 @@ protected final boolean readLine() throws IOException { int startPos = this.readPos - delimPos; int count; - // Search for next occurence of delimiter in read buffer. + // Search for next occurrence of delimiter in read buffer. while (this.readPos < this.limit && delimPos < this.delimiter.length) { if ((this.readBuffer[this.readPos]) == this.delimiter[delimPos]) { // Found the expected delimiter character. Continue looking for the next character of delimiter. diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java index f43bd22aa9d37..038a3c381b3c0 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java @@ -256,7 +256,7 @@ public void setFilePath(String filePath) { // paths) to compute the preview graph. The following is a workaround for // this situation and we should fix this. - // comment (Stephan Ewen) this should be no longer relevant with the current Java/Scalal APIs. + // comment (Stephan Ewen) this should be no longer relevant with the current Java/Scala APIs. if (filePath.isEmpty()) { setFilePath(new Path()); return; diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java index a50e25df9505a..e59d6ee832d57 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java @@ -31,7 +31,7 @@ * is a list that can be appended and iterated over. * *

Using {@code ListState} is typically more efficient than manually maintaining a list in a - * {@link ValueState}, because the backing implementation can support efficient appends, rathern then + * {@link ValueState}, because the backing implementation can support efficient appends, rather than * replacing the full list on write. * *

To create keyed list state (on a KeyedStream), use diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java index d4a49f8877f06..16c00cb439430 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java @@ -54,7 +54,7 @@ public MapStateDescriptor(String name, TypeSerializer keySerializer, TypeSer } /** - * Create a new {@code MapStateDescriptor} with the given name and the given type informations. + * Create a new {@code MapStateDescriptor} with the given name and the given type information. * * @param name The name of the {@code MapStateDescriptor}. * @param keyTypeInfo The type information for the keys in the state. diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java index c6291adbb55ff..c68bb794296e0 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java @@ -76,7 +76,7 @@ public class TypeSerializerSerializationUtil { /** * An {@link ObjectInputStream} that ignores serialVersionUID mismatches when deserializing objects of - * anonymous classes or our Scala serializer classes and also replaces occurences of GenericData.Array + * anonymous classes or our Scala serializer classes and also replaces occurrences of GenericData.Array * (from Avro) by a dummy class so that the KryoSerializer can still be deserialized without * Avro being on the classpath. * diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java index f9e186a434590..3e36b4cca5cde 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java @@ -60,7 +60,7 @@ public boolean isNormalizedKeyPrefixOnly(int keyBytes) { @Override public void putNormalizedKey(Character value, MemorySegment target, int offset, int numBytes) { // note that the char is an unsigned data type in java and consequently needs - // no code that transforms the signed representation to an offsetted representation + // no code that transforms the signed representation to an offset representation // that is equivalent to unsigned, when compared byte by byte if (numBytes == 2) { // default case, full normalized key diff --git a/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java b/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java index 63e76a93e0f04..4aa84693318c2 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java @@ -25,7 +25,7 @@ /** * The {@link KeySelector} allows to use deterministic objects for operations such as - * reduce, reduceGroup, join, coGoup, etc. If invoked multiple times on the same object, + * reduce, reduceGroup, join, coGroup, etc. If invoked multiple times on the same object, * the returned key must be the same. * * The extractor takes an object and returns the deterministic key for that object. diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java index 4767838447f45..d8f07124ed4b6 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java @@ -481,7 +481,7 @@ public static TypeInformation getPartitionerTypes( final int paramLen = exec.getParameterTypes().length; final Method sam = TypeExtractionUtils.getSingleAbstractMethod(Partitioner.class); - // number of parameters the SAM of implemented interface has, the parameter indexing aplicates to this range + // number of parameters the SAM of implemented interface has; the parameter indexing applies to this range final int baseParametersLen = sam.getParameterTypes().length; final Type keyType = TypeExtractionUtils.extractTypeFromLambda( @@ -581,7 +581,7 @@ public static TypeInformation getUnaryOperatorReturnType( final Method sam = TypeExtractionUtils.getSingleAbstractMethod(baseClass); - // number of parameters the SAM of implemented interface has, the parameter indexing aplicates to this range + // number of parameters the SAM of implemented interface has; the parameter indexing applies to this range final int baseParametersLen = sam.getParameterTypes().length; // executable references "this" implicitly diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java index cdf6b23b1301c..ad003cc1f579f 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java @@ -18,10 +18,6 @@ package org.apache.flink.api.java.typeutils.runtime; -import com.esotericsoftware.kryo.Kryo; -import com.esotericsoftware.kryo.Serializer; -import com.esotericsoftware.kryo.io.Input; -import com.esotericsoftware.kryo.io.Output; import org.apache.flink.annotation.Internal; import org.apache.flink.api.common.ExecutionConfig; import org.apache.flink.api.common.typeutils.GenericTypeSerializerConfigSnapshot; @@ -31,6 +27,11 @@ import org.apache.flink.core.memory.DataOutputView; import org.apache.flink.util.InstantiationUtil; import org.apache.flink.util.Preconditions; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.Serializer; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.io.Output; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -220,7 +221,7 @@ public void read(DataInputView in) throws IOException { public static class DummyRegisteredClass implements Serializable {} /** - * Placeholder dummmy for a previously registered Kryo serializer that is no longer valid or in classpath on restore. + * Placeholder dummy for a previously registered Kryo serializer that is no longer valid or in classpath on restore. */ public static class DummyKryoSerializerClass extends Serializer implements Serializable { diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java index a3f4280c9b9a0..ece790e5e9efa 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java @@ -86,7 +86,7 @@ public PojoComparator(Field[] keyFields, TypeComparator[] comparators, TypeSe inverted = k.invertNormalizedKey(); } else if (k.invertNormalizedKey() != inverted) { - // if a successor does not agree on the invertion direction, it cannot be part of the normalized key + // if a successor does not agree on the inversion direction, it cannot be part of the normalized key break; } diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java index 560e5b1faf935..f60ce460e6d76 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java @@ -309,7 +309,7 @@ public boolean canEqual(Object obj) { // -------------------------------------------------------------------------------------------- /** - * Returns the Chill Kryo Serializer which is implictly added to the classpath via flink-runtime. + * Returns the Chill Kryo Serializer which is implicitly added to the classpath via flink-runtime. * Falls back to the default Kryo serializer if it can't be found. * @return The Kryo serializer instance. */ diff --git a/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java b/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java index 50039ac04e2d8..545765f3bb272 100644 --- a/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java +++ b/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java @@ -484,7 +484,7 @@ public final class ConfigConstants { public static final String YARN_TASK_MANAGER_ENV_PREFIX = "yarn.taskmanager.env."; /** - * Template for the YARN container start incovation. + * Template for the YARN container start invocation. */ public static final String YARN_CONTAINER_START_COMMAND_TEMPLATE = "yarn.container-start-command-template"; @@ -594,7 +594,7 @@ public final class ConfigConstants { // ------------------------ Hadoop Configuration ------------------------ /** - * Path to hdfs-defaul.xml file + * Path to hdfs-default.xml file * * @deprecated Use environment variable HADOOP_CONF_DIR instead. */ @@ -980,7 +980,7 @@ public final class ConfigConstants { // --------------------------- High Availability -------------------------- - /** Defines high availabilty mode used for the cluster execution ("NONE", "ZOOKEEPER") */ + /** Defines high availability mode used for the cluster execution ("NONE", "ZOOKEEPER") */ @PublicEvolving public static final String HA_MODE = "high-availability"; diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java index 63017e385ad65..ab70b825a4c7d 100644 --- a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java +++ b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java @@ -75,7 +75,7 @@ public int read(@Nonnull byte[] buffer, int offset, int length) throws IOExcepti @Override public void close() throws IOException { - // Accoring to javadoc, this also closes the channel + // According to javadoc, this also closes the channel this.fis.close(); } diff --git a/flink-core/src/main/java/org/apache/flink/types/CharValue.java b/flink-core/src/main/java/org/apache/flink/types/CharValue.java index f800832a848bf..a815b77c89fcc 100644 --- a/flink-core/src/main/java/org/apache/flink/types/CharValue.java +++ b/flink-core/src/main/java/org/apache/flink/types/CharValue.java @@ -124,7 +124,7 @@ public int getMaxNormalizedKeyLen() { @Override public void copyNormalizedKey(MemorySegment target, int offset, int len) { // note that the char is an unsigned data type in java and consequently needs - // no code that transforms the signed representation to an offsetted representation + // no code that transforms the signed representation to an offset representation // that is equivalent to unsigned, when compared byte by byte if (len == 2) { // default case, full normalized key diff --git a/flink-core/src/main/java/org/apache/flink/types/IntValue.java b/flink-core/src/main/java/org/apache/flink/types/IntValue.java index 347fd1d07232f..bd0b39d6d507e 100644 --- a/flink-core/src/main/java/org/apache/flink/types/IntValue.java +++ b/flink-core/src/main/java/org/apache/flink/types/IntValue.java @@ -123,7 +123,7 @@ public int getMaxNormalizedKeyLen() { @Override public void copyNormalizedKey(MemorySegment target, int offset, int len) { - // take out value and add the integer min value. This gets an offsetted + // take out value and add the integer min value. This gets an offset // representation when interpreted as an unsigned integer (as is the case // with normalized keys). write this value as big endian to ensure the // most significant byte comes first. diff --git a/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java b/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java index 7245e612090fe..fe03fa9946514 100644 --- a/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java +++ b/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java @@ -34,7 +34,7 @@ public class StringBasedID implements Serializable { private final String keyString; /** - * Protected constructor to enfore that subclassing. + * Protected constructor to enforce that subclassing. */ protected StringBasedID(String keyString) { this.keyString = Preconditions.checkNotNull(keyString); diff --git a/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java b/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java index dbdd96b91594f..9df541ed8e449 100644 --- a/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java +++ b/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java @@ -161,7 +161,7 @@ public void testParseInvalid() { fail("exception expected"); } catch (IllegalArgumentException ignored) {} - // brank + // blank try { MemorySize.parseBytes(" "); fail("exception expected"); @@ -185,7 +185,7 @@ public void testParseInvalid() { fail("exception expected"); } catch (IllegalArgumentException ignored) {} - // negavive number + // negative number try { MemorySize.parseBytes("-100 bytes"); fail("exception expected"); diff --git a/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java b/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java index 16f162ddbfe30..86822de426ddb 100644 --- a/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -19,6 +19,43 @@ package org.apache.hadoop.conf; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import org.apache.commons.collections.map.UnmodifiableMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.alias.CredentialProvider; +import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry; +import org.apache.hadoop.security.alias.CredentialProviderFactory; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.StringUtils; +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonGenerator; +import org.w3c.dom.DOMException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.w3c.dom.Text; +import org.xml.sax.SAXException; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; import java.io.BufferedInputStream; import java.io.DataInput; @@ -56,51 +93,12 @@ import java.util.WeakHashMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerException; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; - -import com.google.common.base.Charsets; -import org.apache.commons.collections.map.UnmodifiableMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.alias.CredentialProvider; -import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry; -import org.apache.hadoop.security.alias.CredentialProviderFactory; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.StringInterner; -import org.apache.hadoop.util.StringUtils; -import org.codehaus.jackson.JsonFactory; -import org.codehaus.jackson.JsonGenerator; -import org.w3c.dom.DOMException; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.w3c.dom.Text; -import org.xml.sax.SAXException; - -import com.google.common.base.Preconditions; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; /** * Provides access to configuration parameters. @@ -1937,7 +1935,7 @@ public void setStrings(String name, String... values) { * Get the value for a known password configuration element. * In order to enable the elimination of clear text passwords in config, * this method attempts to resolve the property name as an alias through - * the CredentialProvider API and conditionally fallsback to config. + * the CredentialProvider API and conditionally falls back to config. * @param name property name * @return password */ diff --git a/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java b/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java index 8939c5a8ba07c..99c569cc0c2f7 100644 --- a/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java +++ b/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java @@ -808,7 +808,7 @@ public void testParseStringErrors() throws Exception { } - // Test disabled becase we do not support double-quote escaped quotes right now. + // Test disabled because we do not support double-quote escaped quotes right now. // @Test public void testParserCorrectness() throws Exception { // RFC 4180 Compliance Test content @@ -875,13 +875,13 @@ private FileInputSplit createTempFile(String content) throws IOException { @Test public void testWindowsLineEndRemoval() { - //Check typical use case -- linux file is correct and it is set up to linuc(\n) + //Check typical use case -- linux file is correct and it is set up to linux (\n) this.testRemovingTrailingCR("\n", "\n"); //Check typical windows case -- windows file endings and file has windows file endings set up this.testRemovingTrailingCR("\r\n", "\r\n"); - //Check problematic case windows file -- windows file endings(\r\n) but linux line endings (\n) set up + //Check problematic case windows file -- windows file endings (\r\n) but linux line endings (\n) set up this.testRemovingTrailingCR("\r\n", "\n"); //Check problematic case linux file -- linux file endings (\n) but windows file endings set up (\r\n) diff --git a/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java b/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java index b207e19c1222c..5d00aa202ed40 100644 --- a/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java +++ b/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java @@ -69,7 +69,7 @@ public void testMaxByKeyFieldsDataset() { private final List customTypeData = new ArrayList(); /** - * This test validates that an InvalidProgrammException is thrown when maxBy + * This test validates that an InvalidProgramException is thrown when maxBy * is used on a custom data type. */ @Test(expected = InvalidProgramException.class) @@ -86,7 +86,7 @@ public void testCustomKeyFieldsDataset() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsDataset1() { @@ -100,7 +100,7 @@ public void testOutOfTupleBoundsDataset1() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsDataset2() { @@ -114,7 +114,7 @@ public void testOutOfTupleBoundsDataset2() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsDataset3() { @@ -147,7 +147,7 @@ public void testMaxByKeyFieldsGrouping() { } /** - * This test validates that an InvalidProgrammException is thrown when maxBy + * This test validates that an InvalidProgramException is thrown when maxBy * is used on a custom data type. */ @Test(expected = InvalidProgramException.class) @@ -164,7 +164,7 @@ public void testCustomKeyFieldsGrouping() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsGrouping1() { @@ -178,7 +178,7 @@ public void testOutOfTupleBoundsGrouping1() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsGrouping2() { @@ -192,7 +192,7 @@ public void testOutOfTupleBoundsGrouping2() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsGrouping3() { diff --git a/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java b/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java index 02b84fab50f9c..4a77f9187a5f5 100644 --- a/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java +++ b/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java @@ -69,7 +69,7 @@ public void testMinByKeyFieldsDataset() { private final List customTypeData = new ArrayList(); /** - * This test validates that an InvalidProgrammException is thrown when minBy + * This test validates that an InvalidProgramException is thrown when minBy * is used on a custom data type. */ @Test(expected = InvalidProgramException.class) @@ -86,7 +86,7 @@ public void testCustomKeyFieldsDataset() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsDataset1() { @@ -100,7 +100,7 @@ public void testOutOfTupleBoundsDataset1() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsDataset2() { @@ -114,7 +114,7 @@ public void testOutOfTupleBoundsDataset2() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsDataset3() { @@ -147,7 +147,7 @@ public void testMinByKeyFieldsGrouping() { } /** - * This test validates that an InvalidProgrammException is thrown when minBy + * This test validates that an InvalidProgramException is thrown when minBy * is used on a custom data type. */ @Test(expected = InvalidProgramException.class) @@ -164,7 +164,7 @@ public void testCustomKeyFieldsGrouping() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsGrouping1() { @@ -178,7 +178,7 @@ public void testOutOfTupleBoundsGrouping1() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsGrouping2() { @@ -192,7 +192,7 @@ public void testOutOfTupleBoundsGrouping2() { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = IndexOutOfBoundsException.class) public void testOutOfTupleBoundsGrouping3() { diff --git a/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java b/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java index 0130dec499873..8f36f662e51e5 100644 --- a/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java +++ b/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java @@ -62,7 +62,7 @@ public static void main(String[] args) throws Exception { DataSet> counts = // normalize and split each line text.map(line -> line.toLowerCase().split("\\W+")) - // convert splitted line in pairs (2-tuples) containing: (word,1) + // convert split line in pairs (2-tuples) containing: (word,1) .flatMap((String[] tokens, Collector> out) -> { // emit the pairs with non-zero-length words Arrays.stream(tokens) diff --git a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java b/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java index f991433719cba..b9dba77f245e3 100644 --- a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java +++ b/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java @@ -62,7 +62,7 @@ public static void main(String[] args) throws Exception { DataStream> counts = // normalize and split each line text.map(line -> line.toLowerCase().split("\\W+")) - // convert splitted line in pairs (2-tuples) containing: (word,1) + // convert split line in pairs (2-tuples) containing: (word,1) .flatMap((String[] tokens, Collector> out) -> { // emit the pairs with non-zero-length words Arrays.stream(tokens) diff --git a/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java b/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java index 345b119f75df2..6ad1058545715 100644 --- a/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java +++ b/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java @@ -28,7 +28,7 @@ import java.util.List; /** - * IT cases for lambda filter funtions. + * IT cases for lambda filter functions. */ public class FilterITCase extends JavaProgramTestBase { diff --git a/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java b/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java index 2a12d37210973..7721653914f57 100644 --- a/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java +++ b/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java @@ -133,7 +133,7 @@ public void testTimeoutWindowPruning() { /** * Tests that elements whose timestamp difference is exactly the window length are not matched. - * The reaon is that the right window side (later elements) is exclusive. + * The reason is that the right window side (later elements) is exclusive. */ @Test public void testWindowBorders() { diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java index ead29fc82a83d..c1a8999efe044 100644 --- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java +++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java @@ -46,7 +46,7 @@ public interface RandomGenerableFactory { /** - * The amount of work ({@code elementCount * cyclerPerElement}) is used to + * The amount of work ({@code elementCount * cyclesPerElement}) is used to * generate a list of blocks of work of near-equal size. * * @param elementCount number of elements, as indexed in the {@code BlockInfo} diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala index 721dd69308219..6d78ef79497fc 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala @@ -35,7 +35,7 @@ import breeze.linalg.{DenseVector => BreezeDenseVector, Vector => BreezeVector} /** Implements a soft-margin SVM using the communication-efficient distributed dual coordinate * ascent algorithm (CoCoA) with hinge-loss function. * - * It can be used for binary classification problems, with the labels set as +1.0 to indiciate a + * It can be used for binary classification problems, with the labels set as +1.0 to indicate a * positive example and -1.0 to indicate a negative example. * * The algorithm solves the following minimization problem: diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala index 2c04bb05fa4e2..ee82c03f4847c 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala @@ -154,7 +154,7 @@ object StochasticOutlierSelection extends WithParameters { new TransformDataSetOperation[StochasticOutlierSelection, LabeledVector, (Int, Double)] { - /** Overrides the method of the parent class and applies the sochastic outlier selection + /** Overrides the method of the parent class and applies the stochastic outlier selection * algorithm. * * @param instance Instance of the class @@ -181,7 +181,7 @@ object StochasticOutlierSelection extends WithParameters { } /** [[TransformDataSetOperation]] applies the stochastic outlier selection algorithm on a - * [[Vector]] which will transform the high-dimensionaly input to a single Double output. + * [[Vector]] which will transform the high-dimensional input to a single Double output. * * @tparam T Type of the input and output data which has to be a subtype of [[Vector]] * @return [[TransformDataSetOperation]] a single double which represents the oulierness of diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala index dbe078264131b..ca7cb3309621d 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala @@ -117,7 +117,7 @@ object Estimator{ } /** Fallback [[TransformDataSetOperation]] for [[Transformer]] which do not support the input or - * output type with which they are called. This is usualy the case if pipeline operators are + * output type with which they are called. This is usually the case if pipeline operators are * chained which have incompatible input/output types. In order to detect these failures, the * fallback [[TransformDataSetOperation]] throws a [[RuntimeException]] with the corresponding * input/output types. Consequently, a wrong pipeline will be detected at pre-flight phase of diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala index 9d11cff9e933c..d0f3064425644 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala @@ -230,7 +230,7 @@ trait PredictOperation[Instance, Model, Testing, Prediction] extends Serializabl /** Calculates the prediction for a single element given the model of the [[Predictor]]. * * @param value The unlabeled example on which we make the prediction - * @param model The model representation of the prediciton algorithm + * @param model The model representation of the prediction algorithm * @return A label for the provided example of type [[Prediction]] */ def predict(value: Testing, model: Model): diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala index 014ad2b2a0773..4b441275ec70c 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala @@ -48,7 +48,7 @@ trait Transformer[Self <: Transformer[Self]] with Serializable { that: Self => - /** Transform operation which transforms an input [[DataSet]] of type I into an ouptut [[DataSet]] + /** Transform operation which transforms an input [[DataSet]] of type I into an output [[DataSet]] * of type O. The actual transform operation is implemented within the * [[TransformDataSetOperation]]. * @@ -57,7 +57,7 @@ trait Transformer[Self <: Transformer[Self]] * @param transformOperation [[TransformDataSetOperation]] which encapsulates the algorithm's * logic * @tparam Input Input data type - * @tparam Output Ouptut data type + * @tparam Output Output data type * @return */ def transform[Input, Output]( @@ -125,7 +125,7 @@ object Transformer{ * @tparam Instance Type of the [[Transformer]] for which the [[TransformDataSetOperation]] is * defined * @tparam Input Input data type - * @tparam Output Ouptut data type + * @tparam Output Output data type */ trait TransformDataSetOperation[Instance, Input, Output] extends Serializable{ def transformDataSet( @@ -148,10 +148,10 @@ trait TransformOperation[Instance, Model, Input, Output] extends Serializable{ /** Retrieves the model of the [[Transformer]] for which this operation has been defined. * * @param instance - * @param transformParemters + * @param transformParameters * @return */ - def getModel(instance: Instance, transformParemters: ParameterMap): DataSet[Model] + def getModel(instance: Instance, transformParameters: ParameterMap): DataSet[Model] /** Transforms a single element with respect to the model associated with the respective * [[Transformer]] diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala index 46b14624823b0..3451c80923564 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala @@ -140,7 +140,7 @@ object Splitter { * @param kFolds The number of TrainTest DataSets to be returns. Each 'testing' will be * 1/k of the dataset, randomly sampled, the training will be the remainder * of the dataset. The DataSet is split into kFolds first, so that no - * observation will occurin in multiple folds. + * observation will occuring in multiple folds. * @param seed Random number generator seed. * @return An array of TrainTestDataSets */ diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala index 04543813994cc..2e2e35a4b28a6 100644 --- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala +++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala @@ -40,7 +40,7 @@ import scala.util.Random /** Alternating least squares algorithm to calculate a matrix factorization. * - * Given a matrix `R`, ALS calculates two matricess `U` and `V` such that `R ~~ U^TV`. The + * Given a matrix `R`, ALS calculates two matrices `U` and `V` such that `R ~~ U^TV`. The * unknown row dimension is given by the number of latent factors. Since matrix factorization * is often used in the context of recommendation, we'll call the first matrix the user and the * second matrix the item matrix. The `i`th column of the user matrix is `u_i` and the `i`th diff --git a/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties b/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties index d548f485fe227..86a48a8667b0e 100644 --- a/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties +++ b/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties @@ -18,9 +18,9 @@ ################################################################################ # The config file is used to specify the packages of current module where -# to find TableSourceConverter implementation class annotationed with TableType. +# to find TableSourceConverter implementation class annotated with TableType. # If there are multiple packages to scan, put those packages together into a -# string seperated with ',', for example, org.package1,org.package2. +# string separated with ',', for example, org.package1,org.package2. # Please notice: # It's better to have a tableSourceConverter.properties in each connector Module # which offers converters instead of put all information into the diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala index c3cab130f5ae8..280092f9fe98a 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala @@ -301,7 +301,7 @@ abstract class TableEnvironment(val config: TableConfig) { throw new ExternalCatalogAlreadyExistException(name) } this.externalCatalogs.put(name, externalCatalog) - // create an external catalog calicte schema, register it on the root schema + // create an external catalog Calcite schema, register it on the root schema ExternalCatalogSchema.registerCatalog(rootSchema, name, externalCatalog) } diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala index f4d928fc86da8..534ef394f27bd 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala @@ -40,9 +40,9 @@ class TableSchema( // check uniqueness of field names if (columnNames.toSet.size != columnTypes.length) { val duplicateFields = columnNames - // count occurences of field names + // count occurrences of field names .groupBy(identity).mapValues(_.length) - // filter for occurences > 1 and map to field name + // filter for occurrences > 1 and map to field name .filter(g => g._2 > 1).keys throw new TableException( diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala index 3a195ed746f9f..99d8cab4f39aa 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala @@ -26,11 +26,11 @@ import org.apache.calcite.sql.`type`.SqlTypeName */ class FlinkTypeSystem extends RelDataTypeSystemImpl { - // we cannot use Int.MaxValue because of an overflow in Calcites type inference logic + // we cannot use Int.MaxValue because of an overflow in Calcite's type inference logic // half should be enough for all use cases override def getMaxNumericScale: Int = Int.MaxValue / 2 - // we cannot use Int.MaxValue because of an overflow in Calcites type inference logic + // we cannot use Int.MaxValue because of an overflow in Calcite's type inference logic // half should be enough for all use cases override def getMaxNumericPrecision: Int = Int.MaxValue / 2 @@ -40,7 +40,7 @@ class FlinkTypeSystem extends RelDataTypeSystemImpl { case SqlTypeName.VARCHAR => Int.MaxValue - // we currenty support only timestamps with milliseconds precision + // we currently support only timestamps with milliseconds precision case SqlTypeName.TIMESTAMP => 3 diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala index 40ea5b219877e..90ea8aefe314f 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala @@ -244,7 +244,7 @@ abstract class CodeGenerator( * @param returnType conversion target type. Inputs and output must have the same arity. * @param resultFieldNames result field names necessary for a mapping to POJO fields. * @param rowtimeExpression an expression to extract the value of a rowtime field from - * the input data. Required if the field indicies include a rowtime + * the input data. Required if the field indices include a rowtime * marker. * @return instance of GeneratedExpression */ diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala index e41b8766e1acf..4c01c1c7d9a2d 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala @@ -61,7 +61,7 @@ abstract class ScalarFunction extends UserDefinedFunction { /** * Returns the result type of the evaluation method with a given signature. * - * This method needs to be overriden in case Flink's type extraction facilities are not + * This method needs to be overridden in case Flink's type extraction facilities are not * sufficient to extract the [[TypeInformation]] based on the return type of the evaluation * method. Flink's type extraction facilities can handle basic types or * simple POJOs but might be wrong for more complex, custom, or composite types. diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala index ff699543b786e..d80ec4710d25e 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala @@ -111,7 +111,7 @@ abstract class TableFunction[T] extends UserDefinedFunction { /** * Returns the result type of the evaluation method with a given signature. * - * This method needs to be overriden in case Flink's type extraction facilities are not + * This method needs to be overridden in case Flink's type extraction facilities are not * sufficient to extract the [[TypeInformation]] based on the return type of the evaluation * method. Flink's type extraction facilities can handle basic types or * simple POJOs but might be wrong for more complex, custom, or composite types. diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala index 4b1e921ea42d4..241e511a4c5bb 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala @@ -58,7 +58,7 @@ class AggSqlFunction( createReturnTypeInference(returnType, typeFactory), createOperandTypeInference(aggregateFunction, typeFactory), createOperandTypeChecker(aggregateFunction), - // Do not need to provide a calcite aggregateFunction here. Flink aggregateion function + // Do not need to provide a calcite aggregateFunction here. Flink aggregation function // will be generated when translating the calcite relnode to flink runtime execution plan null, false, diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala index 4a34732801525..c2eabae367ba1 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala @@ -632,7 +632,7 @@ object UserDefinedFunctionUtils { /** * Creates a [[LogicalTableFunctionCall]] by parsing a String expression. * - * @param tableEnv The table environmenent to lookup the function. + * @param tableEnv The table environment to lookup the function. * @param udtf a String expression of a TableFunctionCall, such as "split(c)" * @return A LogicalTableFunctionCall. */ diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala index c53f090f2ed75..43314577ab852 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala @@ -143,7 +143,7 @@ trait CommonCorrelate { |""".stripMargin } else { - // adjust indicies of InputRefs to adhere to schema expected by generator + // adjust indices of InputRefs to adhere to schema expected by generator val changeInputRefIndexShuttle = new RexShuttle { override def visitInputRef(inputRef: RexInputRef): RexNode = { new RexInputRef(inputSchema.arity + inputRef.getIndex, inputRef.getType) diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala index f039cf902784a..56a53a3280092 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala @@ -441,11 +441,11 @@ class DataSetJoin( s"join: (${joinSelectionToString(joinRowType)})" } - /** Returns an array of indicies with some indicies being a prefix. */ + /** Returns an array of indices with some indices being a prefix. */ private def getFullIndiciesWithPrefix(keys: Array[Int], numFields: Int): Array[Int] = { - // get indicies of all fields which are not keys + // get indices of all fields which are not keys val nonKeys = (0 until numFields).filter(!keys.contains(_)) - // return all field indicies prefixed by keys + // return all field indices prefixed by keys keys ++ nonKeys } diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala index 173b7d32112e1..b1c9222f2b33d 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala @@ -98,13 +98,14 @@ object AccMode extends Enumeration { * Changes are encoded as follows: * - insert: (true, NewRow) * - update: (true, NewRow) // the Row includes the full unique key to identify the row to update - * - delete: (false, OldRow) // the Row includes the full unique key to idenify the row to delete + * - delete: (false, OldRow) // the Row includes the full unique key to identify the row to + * delete * */ val Acc = Value /** - * * An operator in [[AccRetract]] mode emits change messages as + * An operator in [[AccRetract]] mode emits change messages as * [[org.apache.flink.table.runtime.types.CRow]] which encode a pair of (Boolean, Row). * * Changes are encoded as follows: diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala index 22d6151f152fe..f9122e1452071 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala @@ -46,9 +46,9 @@ abstract class InlineTable[T]( // check uniqueness of field names if (fieldNames.length != fieldNames.toSet.size) { val duplicateFields = fieldNames - // count occurences of field names + // count occurrences of field names .groupBy(identity).mapValues(_.length) - // filter for occurences > 1 and map to field name + // filter for occurrences > 1 and map to field name .filter(g => g._2 > 1).keys throw new TableException( diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala index a908f497ce2da..31566159f3bbd 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala @@ -27,7 +27,7 @@ import org.apache.flink.table.runtime.types.CRow import org.apache.flink.util.Collector /** - * Computes the final aggregate value from incrementally computed aggreagtes. + * Computes the final aggregate value from incrementally computed aggregates. * * @param numGroupingKey the number of grouping keys * @param numAggregates the number of aggregates diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala index c9fa0c9292360..4ec6407add200 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala @@ -27,7 +27,7 @@ import org.apache.flink.types.Row import org.apache.flink.util.Collector /** - * Computes the final aggregate value from incrementally computed aggreagtes. + * Computes the final aggregate value from incrementally computed aggregates. * * @param numGroupingKey The number of grouping keys. * @param numAggregates The number of aggregates. diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala index 0d69355108986..f40feb1f8439c 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala @@ -30,7 +30,7 @@ import org.apache.flink.types.Row import org.apache.flink.util.{Collector, Preconditions} /** - * ProcessFunction to sort on event-time and possibly addtional secondary sort attributes. + * ProcessFunction to sort on event-time and possibly additional secondary sort attributes. * * @param inputRowType The data type of the input data. * @param rowtimeIdx The index of the rowtime field. diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala index 27d307b540b32..181c7680a35a4 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala @@ -192,7 +192,7 @@ abstract class RowTimeUnboundedOver( val curTimestamp = sortedTimestamps.removeFirst() val curRowList = rowMapState.get(curTimestamp) - // process the same timestamp datas, the mechanism is different according ROWS or RANGE + // process the same timestamp data, the mechanism is different according ROWS or RANGE processElementsWithSameTimestamp(curRowList, lastAccumulator, out) rowMapState.remove(curTimestamp) @@ -234,7 +234,7 @@ abstract class RowTimeUnboundedOver( } /** - * Process the same timestamp datas, the mechanism is different between + * Process the same timestamp data, the mechanism is different between * rows and range window. */ def processElementsWithSameTimestamp( diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala index 7006476abd0ef..18e26df89ccab 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala @@ -420,7 +420,7 @@ object WindowJoinUtil { * Generates a JoinFunction that applies additional join predicates and projects the result. * * @param config table env config - * @param joinType join type to determain whether input can be null + * @param joinType join type to determine whether input can be null * @param leftType left stream type * @param rightType right stream type * @param returnType return type diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala index c443a690febad..44d3b96fd32b0 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala @@ -153,7 +153,7 @@ class CsvTableSource private ( override def projectFields(fields: Array[Int]): CsvTableSource = { val selectedFields = if (fields.isEmpty) Array(0) else fields -// val selectedFiels = fields +// val selectedFields = fields new CsvTableSource( path, diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala index 48ab3ded717d8..a587a2472eb19 100644 --- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala +++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala @@ -243,7 +243,7 @@ object TableSourceUtil { * Returns the Calcite schema of a [[TableSource]]. * * @param tableSource The [[TableSource]] for which the Calcite schema is generated. - * @param selectedFields The indicies of all selected fields. None, if all fields are selected. + * @param selectedFields The indices of all selected fields. None, if all fields are selected. * @param streaming Flag to determine whether the schema of a stream or batch table is created. * @param typeFactory The type factory to create the schema. * @return The Calcite schema for the selected fields of the given [[TableSource]]. diff --git a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala index abe3ae2ce84f7..df84a844862d5 100644 --- a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala +++ b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala @@ -49,7 +49,7 @@ class RowTypeTest extends RowTypeTestBase { "Map('foo', 'bar'), row(1, true))", "ROW(DATE '1985-04-11', CAST(0.1 AS DECIMAL), ARRAY[1, 2, 3], " + "MAP['foo', 'bar'], row(1, true))", - "1985-04-11,0.1,[1, 2, 3],{foo=bar},1,true") // string faltten + "1985-04-11,0.1,[1, 2, 3],{foo=bar},1,true") // string flatten testAllApis( row(1 + 1, 2 * 3, Null(Types.STRING)), diff --git a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarOperatorsTest.scala b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarOperatorsTest.scala index 6dd2afcca9036..d01e213ba94d8 100644 --- a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarOperatorsTest.scala +++ b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarOperatorsTest.scala @@ -76,7 +76,7 @@ class ScalarOperatorsTest extends ScalarOperatorsTestBase { @Test def testArithmetic(): Unit = { - // math arthmetic + // math arithmetic testTableApi('f8 - 5, "f8 - 5", "0") testTableApi('f8 + 5, "f8 + 5", "10") testTableApi('f8 / 2, "f8 / 2", "2") diff --git a/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java b/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java index 368d62d8621f0..af72c965d6451 100755 --- a/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java +++ b/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java @@ -42,7 +42,7 @@ import scala.concurrent.duration.FiniteDuration; /** - * Utils for Mesos entrpoints. + * Utils for Mesos entry points. */ public class MesosEntrypointUtils { diff --git a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java index e4f4cf7ae4153..63f371d9bd05d 100644 --- a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java +++ b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java @@ -27,7 +27,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull; /** - * An abrstact implementation of {@link MesosServices}. + * An abstract implementation of {@link MesosServices}. */ public abstract class AbstractMesosServices implements MesosServices { diff --git a/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java b/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java index 1cc7d38685eeb..461f1dc31cbf0 100644 --- a/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java +++ b/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java @@ -85,7 +85,7 @@ public String filterCharacters(String input) { /** The names under which the registered metrics have been added to the MBeanServer. */ private final Map registeredMetrics; - /** The server to which JMX clients connect to. ALlows for better control over port usage. */ + /** The server to which JMX clients connect to. Allows for better control over port usage. */ private JMXServer jmxServer; public JMXReporter() { diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java index b3b7cf928b494..0136e20f07553 100644 --- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java +++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java @@ -209,7 +209,7 @@ else if (visitable instanceof SolutionSetPlanNode) { } } - // pass the visitor to the iteraton's step function + // pass the visitor to the iteration's step function if (visitable instanceof IterationPlanNode) { // push the iteration node onto the stack final IterationPlanNode iterNode = (IterationPlanNode) visitable; diff --git a/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java b/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java index fefc627b7b749..7248c10900c78 100644 --- a/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java +++ b/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java @@ -136,7 +136,7 @@ public boolean preVisit(PlanNode visitable) { } /* Test on the union input connections - * Must be NUM_INPUTS input connections, all FlatMapOperators with a own partitioning strategy(propably hash) + * Must be NUM_INPUTS input connections, all FlatMapOperators with a own partitioning strategy (probably hash) */ if (visitable instanceof NAryUnionPlanNode) { int numberInputs = 0; diff --git a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java index 3a976e417e8d6..98dc20a819933 100644 --- a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java +++ b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java @@ -59,7 +59,7 @@ public class RedirectHandlerTest extends TestLogger { * Tests the behaviour of the RedirectHandler under the following conditions. * *

1. No local address known --> service unavailable - * 2. Local address knwon but no gateway resolved --> service unavailable + * 2. Local address known but no gateway resolved --> service unavailable * 3. Remote leader gateway --> redirection * 4. Local leader gateway * @throws Exception diff --git a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java index 066de7449de47..23f0f53542230 100644 --- a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java +++ b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java @@ -55,7 +55,7 @@ public void testRespondWithFile() throws Exception { int port = webUI.getServerPort(); try { - // verify that 404 message is returned when requesting a non-existant file + // verify that 404 message is returned when requesting a non-existent file String notFound404 = HistoryServerTest.getFromHTTP("http://localhost:" + port + "/hello"); Assert.assertTrue(notFound404.contains("404 Not Found")); diff --git a/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js b/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js index aadca555f6469..be02f325f5fa2 100644 --- a/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js +++ b/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js @@ -69,7 +69,7 @@ .attr("clip-path", "url(#" + prefix + "-gclip" + ")") // check if the user wants relative time - // if so, substract the first timestamp from each subsequent timestamps + // if so, subtract the first timestamp from each subsequent timestamps if(timeIsRelative){ g.each(function (d, i) { d.forEach(function (datum, index) { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java b/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java index 8078c267160f3..e0279b3dadcdd 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java @@ -49,7 +49,7 @@ public abstract class FlinkUntypedActor extends UntypedActor { * processing time of the incoming message if the logging level is set to debug. After logging * the handleLeaderSessionID method is called. * - *

Important: This method cannot be overriden. The actor specific message handling logic is + *

Important: This method cannot be overridden. The actor specific message handling logic is * implemented by the method handleMessage. * * @param message Incoming message @@ -124,7 +124,7 @@ private void handleNoLeaderId(LeaderSessionMessage msg) { protected abstract void handleMessage(Object message) throws Exception; /** - * Returns the current leader session ID associcated with this actor. + * Returns the current leader session ID associated with this actor. * @return */ protected abstract UUID getLeaderSessionID(); @@ -134,10 +134,10 @@ private void handleNoLeaderId(LeaderSessionMessage msg) { * a leader session ID (indicated by {@link RequiresLeaderSessionID}) in a * {@link LeaderSessionMessage} with the actor's leader session ID. * - *

This method can be overriden to implement a different decoration behavior. + *

This method can be overridden to implement a different decoration behavior. * * @param message Message to be decorated - * @return The deocrated message + * @return The decorated message */ protected Object decorateMessage(Object message) { if (message instanceof RequiresLeaderSessionID) { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java index ae59f590b8db5..83cb18e6a7ccb 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java @@ -261,7 +261,7 @@ public void run() { /* * NOTE: normally it is not required to acquire the write lock to delete the job's - * storage directory since there should be noone accessing it with the ref + * storage directory since there should be no one accessing it with the ref * counter being 0 - acquire it just in case, to always be on the safe side */ readWriteLock.writeLock().lock(); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java index 9a4456ef7d7d0..824563f950ed7 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java @@ -946,7 +946,7 @@ public void run() { * Fails all pending checkpoints which have not been acknowledged by the given execution * attempt id. * - * @param executionAttemptId for which to discard unaknowledged pending checkpoints + * @param executionAttemptId for which to discard unacknowledged pending checkpoints * @param cause of the failure */ public void failUnacknowledgedPendingCheckpointsFor(ExecutionAttemptID executionAttemptId, Throwable cause) { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java index 0f322508d2772..59b404a721320 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java @@ -56,7 +56,7 @@ public class PendingCheckpointStats extends AbstractCheckpointStats { /** Current buffered bytes during alignment over all collected subtasks. */ private volatile long currentAlignmentBuffered; - /** Stats of the latest acknowleged subtask. */ + /** Stats of the latest acknowledged subtask. */ private volatile SubtaskStateStats latestAcknowledgedSubtask; /** diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java b/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java index eb045c0058956..5ce0de81cae05 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java @@ -114,7 +114,7 @@ public Future getJobResultFuture() { } /** - * @return The Job Client actor which communicats with the JobManager. + * @return The Job Client actor which communicates with the JobManager. */ public ActorRef getJobClientActor() { return jobClientActor; diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java index d1efd77aa5564..ecfbc60e4613d 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java @@ -353,7 +353,7 @@ else if(pair.length == 2) { /** * Generates the shell command to start a task manager. * @param flinkConfig The Flink configuration. - * @param tmParams Paramaters for the task manager. + * @param tmParams Parameters for the task manager. * @param configDirectory The configuration directory for the flink-conf.yaml * @param logDirectory The log directory. * @param hasLogback Uses logback? diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java index f9c39c1c92a2e..e20dd9b375646 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java @@ -738,7 +738,7 @@ protected void sendInfoMessage(String message) { * Starts the resource manager actors. * @param configuration The configuration for the resource manager * @param actorSystem The actor system to start the resource manager in - * @param leaderRetriever The leader retriever service to intialize the resource manager + * @param leaderRetriever The leader retriever service to initialize the resource manager * @param resourceManagerClass The class of the ResourceManager to be started * @return ActorRef of the resource manager */ @@ -757,7 +757,7 @@ public static ActorRef startResourceManagerActors( * Starts the resource manager actors. * @param configuration The configuration for the resource manager * @param actorSystem The actor system to start the resource manager in - * @param leaderRetriever The leader retriever service to intialize the resource manager + * @param leaderRetriever The leader retriever service to initialize the resource manager * @param resourceManagerClass The class of the ResourceManager to be started * @param resourceManagerActorName The name of the resource manager actor. * @return ActorRef of the resource manager diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java index bd79218c64daf..c45cd02eed525 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java @@ -40,7 +40,7 @@ * The following environment variables are set in the container: * - HADOOP_CONF_DIR * - * The folloowing Flink configuration entries are updated: + * The following Flink configuration entries are updated: * - fs.hdfs.hadoopconf */ public class HadoopConfOverlay implements ContainerOverlay { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java index 271b32df11e6a..c8c87d44e87f0 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java @@ -33,7 +33,7 @@ /** * Overlays cluster-level Kerberos credentials (i.e. keytab) into a container. * - * The folloowing Flink configuration entries are updated: + * The following Flink configuration entries are updated: * - security.kerberos.login.keytab */ public class KeytabOverlay extends AbstractContainerOverlay { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java index 3dec3f304f335..6eb9af4fff007 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java @@ -76,7 +76,7 @@ public class ResourceProfile implements Serializable, Comparable getVerticesTopologically(); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java index cc35060e7aeb1..367d02c0d0d5f 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java @@ -782,7 +782,7 @@ public void fail(Throwable t) { * @param sampleId of the stack trace sample * @param numSamples the sample should contain * @param delayBetweenSamples to wait - * @param maxStrackTraceDepth of the samples + * @param maxStackTraceDepth of the samples * @param timeout until the request times out * @return Future stack trace sample response */ @@ -790,7 +790,7 @@ public CompletableFuture requestStackTraceSample( int sampleId, int numSamples, Time delayBetweenSamples, - int maxStrackTraceDepth, + int maxStackTraceDepth, Time timeout) { final LogicalSlot slot = assignedResource; @@ -803,7 +803,7 @@ public CompletableFuture requestStackTraceSample( sampleId, numSamples, delayBetweenSamples, - maxStrackTraceDepth, + maxStackTraceDepth, timeout); } else { return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned.")); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java index cb4f2c8e56430..ef4608623e302 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java @@ -447,7 +447,7 @@ else if (numSources < parallelism) { *
  • Repeated executions of stateful tasks try to co-locate the execution with its state. * * - * @return The preferred excution locations for the execution attempt. + * @return The preferred execution locations for the execution attempt. * * @see #getPreferredLocationsBasedOnState() * @see #getPreferredLocationsBasedOnInputs() diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java b/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java index cb79a65b895e6..d29dcec47a84e 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java @@ -31,7 +31,7 @@ /** * This {@link RunningJobsRegistry} tracks the status jobs via marker files, - * marking running jobs viarunning marker files, marking finished jobs via finished marker files. + * marking running jobs via running marker files, marking finished jobs via finished marker files. * *

    The general contract is the following: *

      diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java index 654d5289bba1b..77ffd4fe8268d 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java @@ -204,7 +204,7 @@ public int releaseMemory() throws IOException { ResultSubpartitionView view = readView; if (view != null && view.getClass() == SpillableSubpartitionView.class) { - // If there is a spilalble view, it's the responsibility of the + // If there is a spillable view, it's the responsibility of the // view to release memory. SpillableSubpartitionView spillableView = (SpillableSubpartitionView) view; return spillableView.releaseMemory(); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java index bde358cff9ff8..a36fc57a91321 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java @@ -97,7 +97,7 @@ protected void initialize() throws Exception { // check if the driver is resettable if (this.driver instanceof ResettableDriver) { final ResettableDriver resDriver = (ResettableDriver) this.driver; - // make sure that the according inputs are not reseted + // make sure that the according inputs are not reset for (int i = 0; i < resDriver.getNumberOfInputs(); i++) { if (resDriver.isInputResettable(i)) { excludeFromReset(i); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java index 4f52895dde792..1fe95ebf01054 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java @@ -68,7 +68,7 @@ public class JobVertex implements java.io.Serializable { /** Number of subtasks to split this task into at runtime.*/ private int parallelism = ExecutionConfig.PARALLELISM_DEFAULT; - /** Maximum number of subtasks to split this taks into a runtime. */ + /** Maximum number of subtasks to split this task into a runtime. */ private int maxParallelism = -1; /** The minimum resource of the vertex */ diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java index 65bf2a1a8efc0..3878167684283 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java @@ -32,7 +32,7 @@ public interface SlotContext { * Gets the id under which the slot has been allocated on the TaskManager. This id uniquely identifies the * physical slot. * - * @return The id under whic teh slot has been allocated on the TaskManager + * @return The id under which the slot has been allocated on the TaskManager */ AllocationID getAllocationId(); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java index d3b51f7b119bf..7a627b4adc7da 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java @@ -108,7 +108,7 @@ CompletableFuture offerSlot( * individually accepted or rejected by returning the collection of accepted * slot offers. * - * @param taskManagerLocation from which the slot offeres originate + * @param taskManagerLocation from which the slot offers originate * @param taskManagerGateway to talk to the slot offerer * @param offers slot offers which are offered to the {@link SlotPool} * @return A collection of accepted slot offers (future). The remaining slot offers are diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java index 87b0a76d0e8e4..be81877e734ce 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java @@ -131,7 +131,7 @@ public class BatchTask extends AbstractInvokable impleme protected int[] iterativeInputs; /** - * The indices of the iterative broadcast inputs. Empty, if non of the inputs is iteratve. + * The indices of the iterative broadcast inputs. Empty, if non of the inputs is iterative. */ protected int[] iterativeBroadcastInputs; @@ -184,13 +184,13 @@ public class BatchTask extends AbstractInvokable impleme /** * Certain inputs may be excluded from resetting. For example, the initial partial solution - * in an iteration head must not be reseted (it is read through the back channel), when all - * others are reseted. + * in an iteration head must not be reset (it is read through the back channel), when all + * others are reset. */ private boolean[] excludeFromReset; /** - * Flag indicating for each input whether it is cached and can be reseted. + * Flag indicating for each input whether it is cached and can be reset. */ private boolean[] inputIsCached; diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java index c3bd492d8a660..7ed86bfc80a05 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java @@ -122,14 +122,14 @@ public void prepare() throws Exception } if (objectReuseEnabled) { - // create CoGropuTaskIterator according to provided local strategy. + // create CoGroupTaskIterator according to provided local strategy. this.coGroupIterator = new ReusingSortMergeCoGroupIterator( in1, in2, serializer1, groupComparator1, serializer2, groupComparator2, pairComparatorFactory.createComparator12(groupComparator1, groupComparator2)); } else { - // create CoGropuTaskIterator according to provided local strategy. + // create CoGroupTaskIterator according to provided local strategy. this.coGroupIterator = new NonReusingSortMergeCoGroupIterator( in1, in2, serializer1, groupComparator1, diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java index f46fcfba1769b..bfc9aec426119 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java @@ -628,7 +628,7 @@ public void overwritePointerAt(long pointer, long value) throws IOException { } /** - * Overwrites a record at the sepcified position. The record is read from a DataInputView (this will be the staging area). + * Overwrites a record at the specified position. The record is read from a DataInputView (this will be the staging area). * WARNING: The record must not be larger than the original record. * @param pointer Points to the position to overwrite. * @param input The DataInputView to read the record from diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java index dba6a4cce4886..627dc4c41029b 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java @@ -24,7 +24,7 @@ import java.util.ArrayList; /** - * Response of the {@link ClusterConfigHandler}, respresented as a list + * Response of the {@link ClusterConfigHandler}, represented as a list * of key-value pairs of the cluster {@link Configuration}. */ public class ClusterConfigurationInfo extends ArrayList implements ResponseBody { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java index fea537b6ebd7e..5edccd6c6753d 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java @@ -99,7 +99,7 @@ public abstract class AbstractKeyedStateBackend private final ExecutionConfig executionConfig; /** - * Decoratores the input and output streams to write key-groups compressed. + * Decorates the input and output streams to write key-groups compressed. */ protected final StreamCompressionDecorator keyGroupCompressionDecorator; diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java index 9edf8fcf83d40..aa17efb79f763 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java @@ -426,7 +426,7 @@ static final class PartitionableListState implements ListState { private final ArrayList internalList; /** - * A serializer that allows to perfom deep copies of internalList + * A serializer that allows to perform deep copies of internalList */ private final ArrayListSerializer internalListCopySerializer; diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java index 24e3d92b50457..458c695590124 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java @@ -79,7 +79,7 @@ public SharedStateRegistry(Executor asyncDisposalExecutor) { * * @param state the shared state for which we register a reference. * @return the result of this registration request, consisting of the state handle that is - * registered under the key by the end of the oepration and its current reference count. + * registered under the key by the end of the operation and its current reference count. */ public Result registerReference(SharedStateRegistryKey registrationKey, StreamStateHandle state) { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java b/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java index 799f639cb5615..1384336f2e1d8 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java @@ -179,7 +179,7 @@ public boolean allocateSlot(int index, JobID jobId, AllocationID allocationId, T boolean result = taskSlot.allocate(jobId, allocationId); if (result) { - // update the alloction id to task slot map + // update the allocation id to task slot map allocationIDTaskSlotMap.put(allocationId, taskSlot); // register a timeout for this slot since it's in state allocated diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java index a049063a54966..3c1e98e4e1b9f 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java @@ -1550,7 +1550,7 @@ public void run() { // It is possible that the user code does not react to the task canceller. // for that reason, we spawn this separate thread that repeatedly interrupts - // the user code until it exits. If the suer user code does not exit within + // the user code until it exits. If the user code does not exit within // the timeout, we notify the job manager about a fatal error. while (executer.isAlive()) { long now = System.nanoTime(); diff --git a/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala b/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala index 3df1c0a1d35ba..cab13787c994b 100644 --- a/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala +++ b/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala @@ -588,7 +588,7 @@ object AkkaUtils { * @param tries maximum number of tries before the future fails * @param executionContext which shall execute the future * @param timeout of the future - * @return future which tries to receover by re-executing itself a given number of times + * @return future which tries to recover by re-executing itself a given number of times */ def retry(target: ActorRef, message: Any, tries: Int)(implicit executionContext: ExecutionContext, timeout: FiniteDuration): Future[Any] = { diff --git a/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala b/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala index 435b736ce1c8c..c1227dcd1e755 100644 --- a/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala +++ b/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala @@ -39,7 +39,7 @@ object ArchiveMessages { case object RequestJobCounts /** - * Reqeuest a specific ExecutionGraph by JobID. The response is [[RequestArchivedJob]] + * Request a specific ExecutionGraph by JobID. The response is [[RequestArchivedJob]] * @param jobID */ case class RequestArchivedJob(jobID: JobID) diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java index ab00f2b6562e9..2572bc14868d3 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java @@ -1493,7 +1493,7 @@ public void testTriggerAndConfirmSimpleSavepoint() throws Exception { assertTrue(pending.isDiscarded()); assertTrue(savepointFuture.isDone()); - // the now the saveppoint should be completed but not added to the completed checkpoint store + // the now the savepoint should be completed but not added to the completed checkpoint store assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints()); assertEquals(0, coord.getNumberOfPendingCheckpoints()); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java index ef31f0a3aa63c..bf794570eb054 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java @@ -249,7 +249,7 @@ public void testAbortDiscardsState() throws Exception { @Test public void testPendingCheckpointStatsCallbacks() throws Exception { { - // Complete sucessfully + // Complete successfully PendingCheckpointStats callback = mock(PendingCheckpointStats.class); PendingCheckpoint pending = createPendingCheckpoint(CheckpointProperties.forStandardCheckpoint(), null); pending.setStatsCallback(callback); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java index dc2b11ebd08bc..f493d6f967b40 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java @@ -270,7 +270,7 @@ public void testConcurrentCheckpointOperations() throws Exception { TestCompletedCheckpoint completedCheckpoint3 = createCheckpoint(3, sharedStateRegistry); - // this should release the last lock on completedCheckoint and thus discard it + // this should release the last lock on completedCheckpoint and thus discard it zkCheckpointStore2.addCheckpoint(completedCheckpoint3); // the checkpoint should be discarded eventually because there is no lock on it anymore diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java b/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java index 4fdaef5b8f27c..301d206e86562 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java @@ -67,7 +67,7 @@ public static void teardown() throws Exception { } /** - * Tests wether the JobClientActor can connect to a newly elected leading job manager to obtain + * Tests whether the JobClientActor can connect to a newly elected leading job manager to obtain * the JobExecutionResult. The submitted job blocks for the first execution attempt. The * leading job manager will be killed so that the second job manager will be elected as the * leader. The newly elected leader has to retrieve the checkpointed job from ZooKeeper diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java index bfad3273b1686..fc769aebd912b 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java @@ -95,7 +95,7 @@ public void testNoLocalFailoverWhileCancelling() throws Exception { } /** - * Tests that failures during a global faiover are not handed to the local + * Tests that failures during a global failover are not handed to the local * failover strategy. */ @Test diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java index 75627ed05fc6e..bff7484b5155b 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java @@ -251,7 +251,7 @@ public void testHeartbeatCluster() throws Exception { */ @Test public void testTargetUnmonitoring() throws InterruptedException, ExecutionException { - // this might be too aggresive for Travis, let's see... + // this might be too aggressive for Travis, let's see... long heartbeatTimeout = 100L; ResourceID resourceID = new ResourceID("foobar"); ResourceID targetID = new ResourceID("target"); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java index ba861311d6a3e..8c7ca1b04e0f0 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java @@ -262,7 +262,7 @@ public void testReadWithoutKnownBlockCount() throws Exception final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, true); generator.reset(); - // read and re-generate all records and cmpare them + // read and re-generate all records and compare them final Tuple2 readRec = new Tuple2<>(); for (int i = 0; i < NUM_PAIRS_SHORT; i++) { generator.next(rec); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java index ba92bdf80278b..4964be798a062 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java @@ -112,7 +112,7 @@ public void testRegisterTaskUsesBoundedBuffers() throws Exception { * @param partitionType * the produced partition type * @param channels - * the nummer of output channels + * the number of output channels * * @return instance with minimal data set and some mocks so that it is useful for {@link * NetworkEnvironment#registerTask(Task)} @@ -140,7 +140,7 @@ private static ResultPartition createResultPartition( * @param partitionType * the consumed partition type * @param channels - * the nummer of input channels + * the number of input channels * * @return mock with minimal functionality necessary by {@link NetworkEnvironment#registerTask(Task)} */ diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java index 41201cf2a8932..1076f99438c5a 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java @@ -122,7 +122,7 @@ public void unregisterPartition() throws Exception { ted.unregisterPartition(partitionId2); - // publis something for partitionId1 triggering all according listeners + // publish something for partitionId1 triggering all according listeners assertTrue(ted.publish(partitionId1, event)); assertTrue("listener should have fired for AllWorkersDoneEvent", eventListener1a.fired); assertTrue("listener should have fired for AllWorkersDoneEvent", eventListener2.fired); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java index 51cc469d560d8..5bc207a0ec415 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java @@ -906,7 +906,7 @@ public void testCancelWithSavepoint() throws Exception { msg = new TestingJobManagerMessages.WaitForAllVerticesToBeRunning(jobGraph.getJobID()); Await.result(jobManager.ask(msg, timeout), timeout); - // Notify when canelled + // Notify when cancelled msg = new NotifyWhenJobStatus(jobGraph.getJobID(), JobStatus.CANCELED); Future cancelled = jobManager.ask(msg, timeout); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java index be7407e867aab..47ee1a91db113 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java @@ -147,7 +147,7 @@ public void testTaskMetricGroupCleanup() { taskMetricGroup.close(); - // now alle registered metrics should have been unregistered + // now all registered metrics should have been unregistered assertEquals(0, registry.getNumberRegisteredMetrics()); registry.shutdown(); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java index 74e16a02aa2b9..f860a300830ea 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java @@ -44,7 +44,7 @@ public class KvStateLocationRegistryTest { /** - * Simple test registering/unregistereing state and looking it up again. + * Simple test registering/unregistering state and looking it up again. */ @Test public void testRegisterAndLookup() throws Exception { diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java index 3c79948ecd155..b46b4e06ccf9c 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java @@ -38,7 +38,7 @@ public class KvStateLocationTest { /** - * Simple test registering/unregistereing state and looking it up again. + * Simple test registering/unregistering state and looking it up again. */ @Test public void testRegisterAndLookup() throws Exception { diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java index cf0aef9001fb4..375eb0b5340c8 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java @@ -999,7 +999,7 @@ public void testTimeoutForUnusedTaskManager() throws Exception { () -> slotManager.isTaskManagerIdle(taskManagerConnection.getInstanceID()), mainThreadExecutor); - // check that the TaskManaer is not idle + // check that the TaskManager is not idle assertFalse(idleFuture.get()); final SlotID slotId = slotIdArgumentCaptor.getValue(); diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java index 73377725d7587..17d923458ecc0 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java @@ -30,7 +30,7 @@ import static org.mockito.Mockito.when; /** - * Tests for the CheckpoitnStatsCache. + * Tests for the CheckpointStatsCache. */ public class CheckpointStatsCacheTest { diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java index 4104595910693..6272473f4d100 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java @@ -82,7 +82,7 @@ public void testRegistryNormal() { } /** - * Validate that unregister an unexisted key will throw exception + * Validate that unregister a nonexistent key will throw exception */ @Test(expected = IllegalStateException.class) public void testUnregisterWithUnexistedKey() { diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java b/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java index 0ac607f625d3f..7730aec78158c 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java @@ -463,7 +463,7 @@ public void testBackendUsesRegisteredKryoSerializerUsingGetOrCreate() throws Exc * - snapshot was taken without any Kryo registrations, specific serializers or default serializers for the state type * - restored with the state type registered (no specific serializer) * - * This test should not fail, because de- / serialization of the state should noth be performed with Kryo's default + * This test should not fail, because de- / serialization of the state should not be performed with Kryo's default * {@link com.esotericsoftware.kryo.serializers.FieldSerializer}. */ @Test diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java index f47608cf966ac..0059866cd1ca8 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java @@ -174,7 +174,7 @@ public void calculateNetworkBufMixed() throws Exception { /** * Returns the value or the lower/upper bound in case the value is less/greater than the lower/upper bound, respectively. * - * @param value value to inspec + * @param value value to inspect * @param lower lower bound * @param upper upper bound * diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java index a9b676e33c786..94f325d43c436 100644 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java @@ -104,7 +104,7 @@ public void heartbeatFromResourceManager(ResourceID heartbeatOrigin) { @Override public void disconnectJobManager(JobID jobId, Exception cause) { - // nooop + // noop } @Override diff --git a/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala b/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala index f79c1249bc713..bb16197e809b3 100644 --- a/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala +++ b/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala @@ -121,7 +121,7 @@ object TestingJobManagerMessages { */ case object NotifyWhenClientConnects /** - * Notifes of client connect + * Notifies of client connect */ case object ClientConnected /** diff --git a/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala b/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala index b4ccfaaae7da1..6148450368275 100644 --- a/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala +++ b/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala @@ -325,7 +325,7 @@ object ScalaShellITCase { @AfterClass def afterAll(): Unit = { - // The Scala interpreter somehow changes the class loader. Therfore, we have to reset it + // The Scala interpreter somehow changes the class loader. Therefore, we have to reset it Thread.currentThread().setContextClassLoader(classOf[ScalaShellITCase].getClassLoader) cluster.foreach(_.close) diff --git a/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala b/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala index 11d5ec7dbbb63..9f02706a31225 100644 --- a/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala +++ b/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala @@ -204,7 +204,7 @@ private[flink] trait TypeAnalyzer[C <: Context] { this: MacroContextHolder[C] } if (!hasZeroCtor) { - // We don't support POJOs without zero-paramter ctor + // We don't support POJOs without zero-parameter ctor return GenericClassDescriptor(id, tpe) } diff --git a/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala b/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala index 4266449a8a88b..4a7a3ff322df1 100644 --- a/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala +++ b/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala @@ -40,7 +40,7 @@ class MaxByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsDataset1() { @@ -54,7 +54,7 @@ class MaxByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsDataset2() { @@ -67,7 +67,7 @@ class MaxByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsDataset3() { @@ -96,7 +96,7 @@ class MaxByOperatorTest { } /** - * This test validates that an InvalidProgrammException is thrown when maxBy + * This test validates that an InvalidProgramException is thrown when maxBy * is used on a custom data type. */ @Test(expected = classOf[InvalidProgramException]) @@ -110,7 +110,7 @@ class MaxByOperatorTest { } /** - * This test validates that an InvalidProgrammException is thrown when maxBy + * This test validates that an InvalidProgramException is thrown when maxBy * is used on a custom data type. */ @Test(expected = classOf[InvalidProgramException]) @@ -123,7 +123,7 @@ class MaxByOperatorTest { } /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsGrouping1() { @@ -135,7 +135,7 @@ class MaxByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsGrouping2() { @@ -147,7 +147,7 @@ class MaxByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsGrouping3() { diff --git a/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala b/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala index 5e659ad467377..ca3e7f0d13033 100644 --- a/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala +++ b/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala @@ -39,7 +39,7 @@ class MinByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsDataset1() { @@ -53,7 +53,7 @@ class MinByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsDataset2() { @@ -66,7 +66,7 @@ class MinByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsDataset3() { @@ -78,7 +78,7 @@ class MinByOperatorTest { } /** - * This test validates that an InvalidProgrammException is thrown when minBy + * This test validates that an InvalidProgramException is thrown when minBy * is used on a custom data type. */ @Test(expected = classOf[InvalidProgramException]) @@ -109,7 +109,7 @@ class MinByOperatorTest { } /** - * This test validates that an InvalidProgrammException is thrown when minBy + * This test validates that an InvalidProgramException is thrown when minBy * is used on a custom data type. */ @Test(expected = classOf[InvalidProgramException]) @@ -123,7 +123,7 @@ class MinByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsGrouping1() { @@ -136,7 +136,7 @@ class MinByOperatorTest { /** * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsGrouping2() { @@ -149,7 +149,7 @@ class MinByOperatorTest { /**s * This test validates that an index which is out of bounds throws an - * IndexOutOfBOundsExcpetion. + * IndexOutOfBoundsException. */ @Test(expected = classOf[IndexOutOfBoundsException]) def testOutOfTupleBoundsGrouping3() { diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java index f4597b8378f81..eeaf26d69b3ef 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java @@ -28,7 +28,7 @@ * processing are repeated. For stateful operations and functions, the checkpointing mode defines * whether the system draws checkpoints such that a recovery behaves as if the operators/functions * see each record "exactly once" ({@link #EXACTLY_ONCE}), or whether the checkpoints are drawn - * in a simpler fashion that typically encounteres some duplicates upon recovery + * in a simpler fashion that typically encounters some duplicates upon recovery * ({@link #AT_LEAST_ONCE})

      */ @Public diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java index 4bbb123b5aaa3..c2ebdf483ed11 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java @@ -611,7 +611,7 @@ public int getVersion() { // ------------------------------------------------------------------------ // Utility functions that implement the CoGroup logic based on the tagged - // untion window reduce + // union window reduce // ------------------------------------------------------------------------ private static class Input1Tagger implements MapFunction> { diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java index 28bd5c1707808..8f2531b9441e8 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java @@ -424,7 +424,7 @@ public boolean isForceCheckpointing() { * *

      Shorthand for {@code getCheckpointConfig().getCheckpointingMode()}. * - * @return The checkpoin + * @return The checkpoint mode */ public CheckpointingMode getCheckpointingMode() { return checkpointCfg.getCheckpointingMode(); diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java index 13100db01d837..63563f30684dc 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java @@ -499,7 +499,7 @@ public TypeSerializer getStateKeySerializer(ClassLoader cl) { // ------------------------------------------------------------------------ - // Miscellansous + // Miscellaneous // ------------------------------------------------------------------------ public void setChainStart() { diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java index 70b9fd45a8803..0a05f09eb5ac1 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java @@ -558,7 +558,7 @@ private Collection transformOneInputTransform(OneInputTransfo /** * Transforms a {@code TwoInputTransformation}. * - *

      This recusively transforms the inputs, creates a new {@code StreamNode} in the graph and + *

      This recursively transforms the inputs, creates a new {@code StreamNode} in the graph and * wired the inputs to this new node. */ private Collection transformTwoInputTransform(TwoInputTransformation transform) { @@ -617,7 +617,7 @@ private Collection transformTwoInputTransform(TwoInputT * *

      If the user specifies a group name, this is taken as is. If nothing is specified and * the input operations all have the same group name then this name is taken. Otherwise the - * default group is choosen. + * default group is chosen. * * @param specifiedGroup The group specified by the user. * @param inputIds The IDs of the input operations. diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java index 1b531aa200a28..b031dcf063825 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java @@ -128,7 +128,7 @@ public HeapInternalTimerService( *

        *
      1. Setting the {@code keySerialized} and {@code namespaceSerializer} for the timers it will contain.
      2. *
      3. Setting the {@code triggerTarget} which contains the action to be performed when a timer fires.
      4. - *
      5. Re-registering timers that were retrieved after recoveting from a node failure, if any.
      6. + *
      7. Re-registering timers that were retrieved after recovering from a node failure, if any.
      8. *
      * This method can be called multiple times, as long as it is called with the same serializers. */ diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java index 751de76a93b0a..2b31d51c3a062 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java @@ -40,7 +40,7 @@ public interface AsyncResult { /** * True fi the async result is a collection of output elements; otherwise false. * - * @return True if the async reuslt is a collection of output elements; otherwise false + * @return True if the async result is a collection of output elements; otherwise false */ boolean isResultCollection(); diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java index e2c3426f9ce5a..687ea7ef72d00 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java @@ -219,7 +219,7 @@ public int size() { /** * Callback for onComplete events for the given stream element queue entry. Whenever a queue - * entry is completed, it is checked whether this entry belogns to the first set. If this is the + * entry is completed, it is checked whether this entry belongs to the first set. If this is the * case, then the element is added to the completed entries queue from where it can be consumed. * If the first set becomes empty, then the next set is polled from the uncompleted entries * queue. Completed entries from this new set are then added to the completed entries queue. diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java index 28496fc31be23..0fc5d2ca610de 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java @@ -61,7 +61,7 @@ public class CoFeedbackTransformation extends StreamTransformation { /** * Creates a new {@code CoFeedbackTransformation} from the given input. * - * @param parallelism The parallelism of the upstream {@code StreamTransformatino} and the + * @param parallelism The parallelism of the upstream {@code StreamTransformation} and the * feedback edges. * @param feedbackType The type of the feedback edges * @param waitTime The wait time of the feedback operator. After the time expires diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java index c9362866eed92..b39ce27c6998e 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java @@ -34,7 +34,7 @@ * {@link org.apache.flink.streaming.api.operators.OneInputStreamOperator} to one input * {@link org.apache.flink.streaming.api.transformations.StreamTransformation}. * - * @param The type of the elements in the nput {@code StreamTransformation} + * @param The type of the elements in the input {@code StreamTransformation} * @param The type of the elements that result from this {@code OneInputTransformation} */ @Internal diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java index 10ac2a6a3fb7d..4c148e8ecd8f2 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java @@ -207,7 +207,7 @@ public void merge(Collection toBeMerged, W mergeResult) { // don't merge the new window itself, it never had any state associated with it // i.e. if we are only merging one pre-existing window into itself - // without extending the pre-exising window + // without extending the pre-existing window if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) { mergeFunction.merge(mergeResult, mergedWindows, diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java index cf606bc5427d1..aa1916818319c 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java @@ -628,7 +628,7 @@ protected void deleteCleanupTimer(W window) { /** * Returns the cleanup time for a window, which is * {@code window.maxTimestamp + allowedLateness}. In - * case this leads to a value greated than {@link Long#MAX_VALUE} + * case this leads to a value greater than {@link Long#MAX_VALUE} * then a cleanup time of {@link Long#MAX_VALUE} is * returned. * diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java index 21f62081b903e..3751670eb54a2 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java @@ -47,7 +47,7 @@ * *

      Field expressions that specify nested fields (e.g. "f1.a.foo") result in nested field * accessors. These penetrate one layer, and then delegate the rest of the work to an - * "innerAccesor". (see PojoFieldAccessor, RecursiveTupleFieldAccessor, + * "innerAccessor". (see PojoFieldAccessor, RecursiveTupleFieldAccessor, * RecursiveProductFieldAccessor) */ @Internal diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java index 68738ba8eb69e..b76ade741cde4 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java @@ -187,7 +187,7 @@ public Long map(Long value) throws Exception { assertTrue(edge.getPartitioner() instanceof ForwardPartitioner); } - // verify self union with differnt partitioners + // verify self union with different partitioners assertTrue(streamGraph.getStreamNode(selfUnionDifferentPartition.getId()).getInEdges().size() == 2); boolean hasForward = false; boolean hasBroadcast = false; diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java index 019facabf5975..6ed4bf76506b7 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java @@ -208,7 +208,7 @@ public void testLateMerging() throws Exception { TestingMergeFunction mergeFunction = new TestingMergeFunction(); - // add several non-overlapping initial windoww + // add several non-overlapping initial windows mergeFunction.reset(); assertEquals(new TimeWindow(0, 3), windowSet.addWindow(new TimeWindow(0, 3), mergeFunction)); @@ -333,7 +333,7 @@ public void testMergeLargeWindowCoveringMultipleWindows() throws Exception { TestingMergeFunction mergeFunction = new TestingMergeFunction(); - // add several non-overlapping initial windoww + // add several non-overlapping initial windows mergeFunction.reset(); assertEquals(new TimeWindow(1, 3), windowSet.addWindow(new TimeWindow(1, 3), mergeFunction)); diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java index bd263f6299624..30b8da99bbe3c 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java @@ -1468,7 +1468,7 @@ private void testWindowsAreMergedEagerly(final TimeDomainAdaptor timeAdaptor) th @Override public TriggerResult answer(InvocationOnMock invocation) throws Exception { Trigger.TriggerContext context = (Trigger.TriggerContext) invocation.getArguments()[3]; - // don't intefere with cleanup timers + // don't interfere with cleanup timers timeAdaptor.registerTimer(context, 0L); context.getPartitionedState(valueStateDescriptor).update("hello"); return TriggerResult.CONTINUE; @@ -1479,7 +1479,7 @@ public TriggerResult answer(InvocationOnMock invocation) throws Exception { @Override public TriggerResult answer(InvocationOnMock invocation) throws Exception { Trigger.OnMergeContext context = (Trigger.OnMergeContext) invocation.getArguments()[1]; - // don't intefere with cleanup timers + // don't interfere with cleanup timers timeAdaptor.registerTimer(context, 0L); context.getPartitionedState(valueStateDescriptor).update("hello"); return TriggerResult.CONTINUE; diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java index d7df479094044..7c5767aa10ba1 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java @@ -191,7 +191,7 @@ public void testRestoreSessionWindowsWithCountTrigger() throws Exception { TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator()); - // add an element that merges the two "key1" sessions, they should now have count 6, and therfore fire + // add an element that merges the two "key1" sessions, they should now have count 6, and therefore fire testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500)); expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L)); @@ -300,7 +300,7 @@ public void testRestoreSessionWindowsWithCountTriggerInMintCondition() throws Ex TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator()); - // add an element that merges the two "key1" sessions, they should now have count 6, and therfore fire + // add an element that merges the two "key1" sessions, they should now have count 6, and therefore fire testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500)); expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L)); diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java index acdf45a635b49..2fa1c3cdd2eb7 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java @@ -713,7 +713,7 @@ public void testSessionWindowsWithCountTrigger() throws Exception { TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator()); - // add an element that merges the two "key1" sessions, they should now have count 6, and therfore fire + // add an element that merges the two "key1" sessions, they should now have count 6, and therefore fire testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500)); expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L)); diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java index e3e51aa0d39ee..8bb1028029f7d 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java @@ -283,7 +283,7 @@ static class BlockingCallable implements Callable { @Override public OperatorStateHandle call() throws Exception { - // notify that we have started the asynchronous checkpointint operation + // notify that we have started the asynchronous checkpointed operation CHECKPOINTING_LATCH.trigger(); // wait until we have reached the StreamTask#cleanup --> This will already cancel this FutureTask CLEANUP_LATCH.await(); diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java index 8ce8b03a61b6c..5059827350480 100644 --- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java +++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java @@ -743,7 +743,7 @@ public Object answer(InvocationOnMock invocationOnMock) throws Throwable { } /** - * Tests that the StreamTask first closes alls its operators before setting its + * Tests that the StreamTask first closes all of its operators before setting its * state to not running (isRunning == false) * *

      See FLINK-7430. diff --git a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala index 52c53d57a51af..101d3588c317f 100644 --- a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala +++ b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala @@ -75,7 +75,7 @@ class CoGroupedStreams[T1, T2](input1: DataStream[T1], input2: DataStream[T2]) { * A co-group operation that has [[KeySelector]]s defined for the first input. * * You need to specify a [[KeySelector]] for the second input using [[equalTo()]] - * before you can proceeed with specifying a [[WindowAssigner]] using [[EqualTo.window()]]. + * before you can proceed with specifying a [[WindowAssigner]] using [[EqualTo.window()]]. * * @tparam KEY Type of the key. This must be the same for both inputs */ diff --git a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java index 51fad6b25e822..0503c935586d5 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java +++ b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java @@ -53,7 +53,7 @@ public static void main(String[] args) throws Exception { env.execute("Checkpointed Streaming Program"); } - // with Checkpoining + // with Checkpointing private static class SimpleStringGenerator implements SourceFunction, ListCheckpointed { public boolean running = true; diff --git a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java index 819ad29c3d155..da334473f19f4 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java +++ b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java @@ -37,7 +37,7 @@ /** * A streaming program with a custom reducing KvState. * - *

      This is used to test proper usage of the user code class laoder when + *

      This is used to test proper usage of the user code class loader when * disposing savepoints. */ public class CustomKvStateProgram { diff --git a/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java b/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java index 38adb62d0221a..aa969098c818e 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java +++ b/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java @@ -460,7 +460,7 @@ public Integer getKey(CustomType value) throws Exception { @Test public void testDefaultJoinOnTwoCustomTypeInputsWithInnerClassKeyExtractorsDisabledClosureCleaner() throws Exception { /* - * (Default) Join on two custom type inputs with key extractors, check if disableing closure cleaning works + * (Default) Join on two custom type inputs with key extractors, check if disabling closure cleaning works */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); diff --git a/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java b/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java index 1fbbdb252701a..75a885f6fff22 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java +++ b/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java @@ -156,7 +156,7 @@ public void testJobExecutionOnClusterWithLeaderReelection() throws Exception { configuration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTM); // we "effectively" disable the automatic RecoverAllJobs message and sent it manually to make - // sure that all TMs have registered to the JM prior to issueing the RecoverAllJobs message + // sure that all TMs have registered to the JM prior to issuing the RecoverAllJobs message configuration.setString(AkkaOptions.ASK_TIMEOUT, AkkaUtils.INF_TIMEOUT().toString()); Tasks.BlockingOnceReceiver$.MODULE$.blocking_$eq(true); diff --git a/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java b/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java index 0deda4b9c7265..5e08e8ae3d2cb 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java +++ b/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java @@ -301,7 +301,7 @@ public void testDisabledTimestamps() throws Exception { /** * This tests whether timestamps are properly extracted in the timestamp - * extractor and whether watermarks are also correctly forwared from this with the auto watermark + * extractor and whether watermarks are also correctly forwarded from this with the auto watermark * interval. */ @Test @@ -363,7 +363,7 @@ public long extractAscendingTimestamp(Integer element) { } /** - * This thests whether timestamps are properly extracted in the timestamp + * This tests whether timestamps are properly extracted in the timestamp * extractor and whether watermark are correctly forwarded from the custom watermark emit * function. */ diff --git a/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java b/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java index aadeaebb505f5..6d5df02e46819 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java +++ b/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java @@ -27,7 +27,7 @@ /** * Implements a feature vector as a multi-dimensional point. Coordinates of that point * (= the features) are stored as double values. The distance between two feature vectors is - * the Euclidian distance between the points. + * the Euclidean distance between the points. */ public final class CoordVector implements Value, Comparable { private static final long serialVersionUID = 1L; @@ -82,14 +82,14 @@ public void setCoordinates(double[] coordinates) { } /** - * Computes the Euclidian distance between this coordinate vector and a + * Computes the Euclidean distance between this coordinate vector and a * second coordinate vector. * * @param cv The coordinate vector to which the distance is computed. - * @return The Euclidian distance to coordinate vector cv. If cv has a + * @return The Euclidean distance to coordinate vector cv. If cv has a * different length than this coordinate vector, -1 is returned. */ - public double computeEuclidianDistance(CoordVector cv) { + public double computeEuclideanDistance(CoordVector cv) { // check coordinate vector lengths if (cv.coordinates.length != this.coordinates.length) { return -1.0; diff --git a/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java b/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java index 56a95ce09d047..093a44e35e5a6 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java +++ b/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java @@ -95,7 +95,7 @@ public E nextEvent() { final int index = i % subGeneratorLists.size(); EventGenerator subGenerator = subGeneratorLists.get(index); - // check if the sub-generator can produce an event under the current gloabl watermark + // check if the sub-generator can produce an event under the current global watermark if (subGenerator.canGenerateEventAtWatermark(globalWatermark)) { E event = subGenerator.generateEvent(globalWatermark); diff --git a/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala b/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala index 76e854778ba1e..6dc2ac5d85d4e 100644 --- a/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala +++ b/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala @@ -78,7 +78,7 @@ class BatchScalaAPICompletenessTest extends ScalaAPICompletenessTestBase { """^org\.apache\.flink\.api.java.*project""", // I don't want to have withParameters in the API since I consider Configuration to be - // deprecated. But maybe thats just me ... + // deprecated. But maybe that's just me ... """^org\.apache\.flink\.api.java.*withParameters""", // These are only used internally. Should be internal API but Java doesn't have diff --git a/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala b/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala index 8f7b8bb4cd164..3181a455847ee 100644 --- a/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala +++ b/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala @@ -71,7 +71,7 @@ class GroupingTest { val env = ExecutionEnvironment.getExecutionEnvironment val tupleDs = env.fromCollection(emptyTupleData) - // should not work, fiels position out of range + // should not work, field position out of range tupleDs.groupBy(5) } diff --git a/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala b/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala index 228eaaa0a74b0..f54528f0b89af 100644 --- a/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala +++ b/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala @@ -36,7 +36,7 @@ import org.apache.flink.runtime.testingUtils.TestingTaskManagerLike * @param config Configuration object for the actor * @param resourceID The Yarn container id * @param connectionInfo Connection information of this actor - * @param memoryManager MemoryManager which is responsibel for Flink's managed memory allocation + * @param memoryManager MemoryManager which is responsible for Flink's managed memory allocation * @param ioManager IOManager responsible for I/O * @param network NetworkEnvironment for this actor * @param numberOfSlots Number of slots for this TaskManager diff --git a/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java b/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java index 19d1af5a38d55..c11c4139a11c3 100644 --- a/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java +++ b/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java @@ -291,7 +291,7 @@ public void testSetupApplicationMasterContainer() { .getCommands().get(0)); // logback + log4j, with/out krb5, different JVM opts - // IMPORTANT: Beaware that we are using side effects here to modify the created YarnClusterDescriptor + // IMPORTANT: Be aware that we are using side effects here to modify the created YarnClusterDescriptor cfg.setString(CoreOptions.FLINK_JM_JVM_OPTIONS, jmJvmOpts); assertEquals( java + " " + jvmmem + @@ -322,7 +322,7 @@ public void testSetupApplicationMasterContainer() { .getCommands().get(0)); // now try some configurations with different yarn.container-start-command-template - // IMPORTANT: Beaware that we are using side effects here to modify the created YarnClusterDescriptor + // IMPORTANT: Be aware that we are using side effects here to modify the created YarnClusterDescriptor cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% 1 %jvmmem% 2 %jvmopts% 3 %logging% 4 %class% 5 %args% 6 %redirects%"); assertEquals( @@ -341,7 +341,7 @@ public void testSetupApplicationMasterContainer() { cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% %logging% %jvmopts% %jvmmem% %class% %args% %redirects%"); - // IMPORTANT: Beaware that we are using side effects here to modify the created YarnClusterDescriptor + // IMPORTANT: Be aware that we are using side effects here to modify the created YarnClusterDescriptor assertEquals( java + " " + logfile + " " + logback + " " + log4j + diff --git a/tools/create_release_files.sh b/tools/create_release_files.sh index 5d134e81ef3dd..3e7a6e75f0856 100755 --- a/tools/create_release_files.sh +++ b/tools/create_release_files.sh @@ -266,7 +266,7 @@ prepare make_source_release -# build dist by input parameter of "--scala-vervion xxx --hadoop-version xxx" +# build dist by input parameter of "--scala-version xxx --hadoop-version xxx" if [ "$SCALA_VERSION" == "none" ] && [ "$HADOOP_VERSION" == "none" ]; then make_binary_release "hadoop2" "" "2.11" make_binary_release "hadoop26" "-Dhadoop.version=2.6.5" "2.11" diff --git a/tools/list_deps.py b/tools/list_deps.py index aba92d5852aa7..125ce14bf78eb 100755 --- a/tools/list_deps.py +++ b/tools/list_deps.py @@ -23,7 +23,7 @@ # This lists all dependencies in the Maven Project root given as first # argument. If a dependency is included in several versions it is listed once -# for every version. The resul output is sorted. So this can be used +# for every version. The result output is sorted. So this can be used # to get a diff between the Maven dependencies of two versions of a project. path = sys.argv[1] diff --git a/tools/merge_flink_pr.py b/tools/merge_flink_pr.py index 76a7694dc9a53..1799a5512b46b 100755 --- a/tools/merge_flink_pr.py +++ b/tools/merge_flink_pr.py @@ -46,7 +46,7 @@ # Location of your FLINK git development area FLINK_HOME = os.environ.get("FLINK_HOME", "/home/patrick/Documents/spark") -# Remote name which points to the Gihub site +# Remote name which points to the Github site PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github") # Remote name which points to Apache git PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")