From 31886822ab95ccf89a07ec6ce844590357ed073d Mon Sep 17 00:00:00 2001 From: Pierre Grimaud Date: Sun, 19 Apr 2020 02:45:33 +0200 Subject: [PATCH] Fix typos --- acl_create_response.go | 2 +- alter_configs_response.go | 4 ++-- alter_partition_reassignments_request_test.go | 6 +++--- api_versions_response.go | 2 +- broker.go | 6 +++--- broker_test.go | 2 +- client_test.go | 2 +- examples/consumergroup/main.go | 2 +- list_partition_reassignments_request_test.go | 2 +- record_test.go | 2 +- sasl_authenticate_response_test.go | 2 +- tools/kafka-console-producer/README.md | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/acl_create_response.go b/acl_create_response.go index bc018ed00..14b1b9e13 100644 --- a/acl_create_response.go +++ b/acl_create_response.go @@ -2,7 +2,7 @@ package sarama import "time" -//CreateAclsResponse is a an acl reponse creation type +//CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse diff --git a/alter_configs_response.go b/alter_configs_response.go index 99a526005..3266f9274 100644 --- a/alter_configs_response.go +++ b/alter_configs_response.go @@ -2,13 +2,13 @@ package sarama import "time" -//AlterConfigsResponse is a reponse type for alter config +//AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } -//AlterConfigsResourceResponse is a reponse type for alter config resource +//AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string diff --git a/alter_partition_reassignments_request_test.go b/alter_partition_reassignments_request_test.go index 8d282729d..c917f2d79 100644 --- a/alter_partition_reassignments_request_test.go +++ b/alter_partition_reassignments_request_test.go @@ -4,13 +4,13 @@ import "testing" var ( alterPartitionReassignmentsRequestNoBlock = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 1, // 1-1=0 blocks 0, // empty tagged fields } alterPartitionReassignmentsRequestOneBlock = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions @@ -22,7 +22,7 @@ var ( } alterPartitionReassignmentsAbortRequest = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions diff --git a/api_versions_response.go b/api_versions_response.go index 582e29db4..d09e8d9e1 100644 --- a/api_versions_response.go +++ b/api_versions_response.go @@ -1,6 +1,6 @@ package sarama -//ApiVersionsResponseBlock is an api version reponse block type +//ApiVersionsResponseBlock is an api version response block type type ApiVersionsResponseBlock struct { ApiKey int16 MinVersion int16 diff --git a/broker.go b/broker.go index 4f3991af7..90a1db981 100644 --- a/broker.go +++ b/broker.go @@ -73,7 +73,7 @@ const ( // server negotiate SASL by wrapping tokens with Kafka protocol headers. SASLHandshakeV1 = int16(1) // SASLExtKeyAuth is the reserved extension key name sent as part of the - // SASL/OAUTHBEARER intial client response + // SASL/OAUTHBEARER initial client response SASLExtKeyAuth = "auth" ) @@ -369,7 +369,7 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } -//CommitOffset return an Offset commit reponse or error +//CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) @@ -1014,7 +1014,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int // When credentials are invalid, Kafka replies with a SaslAuthenticate response // containing an error code and message detailing the authentication failure. func (b *Broker) sendAndReceiveSASLPlainAuth() error { - // default to V0 to allow for backward compatability when SASL is enabled + // default to V0 to allow for backward compatibility when SASL is enabled // but not the handshake if b.conf.Net.SASL.Handshake { handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) diff --git a/broker_test.go b/broker_test.go index e2b17462c..08fe77f7f 100644 --- a/broker_test.go +++ b/broker_test.go @@ -295,7 +295,7 @@ func TestSASLSCRAMSHAXXX(t *testing.T) { scramChallengeResp string }{ { - name: "SASL/SCRAMSHAXXX successfull authentication", + name: "SASL/SCRAMSHAXXX successful authentication", mockHandshakeErr: ErrNoError, scramClient: &MockSCRAMClient{}, scramChallengeResp: "pong", diff --git a/client_test.go b/client_test.go index cd47a500b..9eee454da 100644 --- a/client_test.go +++ b/client_test.go @@ -642,7 +642,7 @@ func TestClientController(t *testing.T) { } defer safeClose(t, client2) if _, err = client2.Controller(); err != ErrUnsupportedVersion { - t.Errorf("Expected Contoller() to return %s, found %s", ErrUnsupportedVersion, err) + t.Errorf("Expected Controller() to return %s, found %s", ErrUnsupportedVersion, err) } } diff --git a/examples/consumergroup/main.go b/examples/consumergroup/main.go index 776448f59..9a8b7cd8d 100644 --- a/examples/consumergroup/main.go +++ b/examples/consumergroup/main.go @@ -28,7 +28,7 @@ func init() { flag.StringVar(&brokers, "brokers", "", "Kafka bootstrap brokers to connect to, as a comma separated list") flag.StringVar(&group, "group", "", "Kafka consumer group definition") flag.StringVar(&version, "version", "2.1.1", "Kafka cluster version") - flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma seperated list") + flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma separated list") flag.StringVar(&assignor, "assignor", "range", "Consumer group partition assignment strategy (range, roundrobin, sticky)") flag.BoolVar(&oldest, "oldest", true, "Kafka consumer consume initial offset from oldest") flag.BoolVar(&verbose, "verbose", false, "Sarama logging") diff --git a/list_partition_reassignments_request_test.go b/list_partition_reassignments_request_test.go index d9f9f92ca..41a5b9cf1 100644 --- a/list_partition_reassignments_request_test.go +++ b/list_partition_reassignments_request_test.go @@ -4,7 +4,7 @@ import "testing" var ( listPartitionReassignmentsRequestOneBlock = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions diff --git a/record_test.go b/record_test.go index 2756c5b25..1aceeda2c 100644 --- a/record_test.go +++ b/record_test.go @@ -283,7 +283,7 @@ func TestRecordBatchDecoding(t *testing.T) { r.length = varintLengthField{} } // The compression level is not restored on decoding. It is not needed - // anyway. We only set it here to ensure that comparision succeeds. + // anyway. We only set it here to ensure that comparison succeeds. batch.CompressionLevel = tc.batch.CompressionLevel if !reflect.DeepEqual(batch, tc.batch) { t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch)) diff --git a/sasl_authenticate_response_test.go b/sasl_authenticate_response_test.go index 04447966a..c555cfbfa 100644 --- a/sasl_authenticate_response_test.go +++ b/sasl_authenticate_response_test.go @@ -17,5 +17,5 @@ func TestSaslAuthenticateResponse(t *testing.T) { response.ErrorMessage = &msg response.SaslAuthBytes = []byte(`msg`) - testResponse(t, "authenticate reponse", response, saslAuthenticatResponseErr) + testResponse(t, "authenticate response", response, saslAuthenticatResponseErr) } diff --git a/tools/kafka-console-producer/README.md b/tools/kafka-console-producer/README.md index 6b3a65f21..7802b8bdf 100644 --- a/tools/kafka-console-producer/README.md +++ b/tools/kafka-console-producer/README.md @@ -25,7 +25,7 @@ A simple command line tool to produce a single message to Kafka. # Partitioning: by default, kafka-console-producer will partition as follows: # - manual partitioning if a -partition is provided # - hash partitioning by key if a -key is provided - # - random partioning otherwise. + # - random partitioning otherwise. # # You can override this using the -partitioner argument: echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random