Skip to content

Commit

Permalink
Merge pull request IBM#2010 from Shopify/dnwe/misspell
Browse files Browse the repository at this point in the history
chore: enable exportloopref and misspell linters
  • Loading branch information
dnwe authored Sep 8, 2021
2 parents 16fb95c + a107d3b commit 1e3101e
Show file tree
Hide file tree
Showing 8 changed files with 20 additions and 19 deletions.
3 changes: 2 additions & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ linters:
- bodyclose
- deadcode
- depguard
- exportloopref
- dogsled
# - dupl
- errcheck
Expand All @@ -55,7 +56,7 @@ linters:
# - gosimple
- govet
# - ineffassign
# - misspell
- misspell
# - nakedret
- nilerr
# - scopelint
Expand Down
2 changes: 1 addition & 1 deletion async_producer.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ type ProducerMessage struct {
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp can vary in behaviour depending on broker configuration, being
// Timestamp can vary in behavior depending on broker configuration, being
// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
// and requiring version at least 0.10.0.
//
Expand Down
4 changes: 2 additions & 2 deletions config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func TestNetConfigValidates(t *testing.T) {
cfg.Net.SASL.Mechanism = SASLTypeSCRAMSHA256
cfg.Net.SASL.SCRAMClientGeneratorFunc = nil
cfg.Net.SASL.User = "user"
cfg.Net.SASL.Password = "stong_password"
cfg.Net.SASL.Password = "strong_password"
},
"A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc",
},
Expand All @@ -126,7 +126,7 @@ func TestNetConfigValidates(t *testing.T) {
cfg.Net.SASL.Mechanism = SASLTypeSCRAMSHA512
cfg.Net.SASL.SCRAMClientGeneratorFunc = nil
cfg.Net.SASL.User = "user"
cfg.Net.SASL.Password = "stong_password"
cfg.Net.SASL.Password = "strong_password"
},
"A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc",
},
Expand Down
4 changes: 2 additions & 2 deletions consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ func (child *partitionConsumer) preferredBroker() (*Broker, error) {
}
}

// if prefered replica cannot be found fallback to leader
// if preferred replica cannot be found fallback to leader
return child.consumer.client.Leader(child.topic, child.partition)
}

Expand Down Expand Up @@ -845,7 +845,7 @@ func (bc *brokerConsumer) handleResponses() {
if result == nil {
if preferredBroker, err := child.preferredBroker(); err == nil {
if bc.broker.ID() != preferredBroker.ID() {
// not an error but needs redispatching to consume from prefered replica
// not an error but needs redispatching to consume from preferred replica
child.trigger <- none{}
delete(bc.subscriptions, child)
}
Expand Down
2 changes: 1 addition & 1 deletion consumer_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type ConsumerGroup interface {
// in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
// from concurrent reads/writes.
// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
// parent context is cancelled or when a server-side rebalance cycle is initiated.
// parent context is canceled or when a server-side rebalance cycle is initiated.
// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
// to allow the user to perform any final tasks before a rebalance.
// 6. Finally, marked offsets are committed one last time before claims are released.
Expand Down
18 changes: 9 additions & 9 deletions functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func TestMain(m *testing.M) {
//
// In either case, the following topics will be deleted (if they exist) and
// then created/pre-seeded with data for the functional test run:
// * uncomitted-topic-test-4
// * uncommitted-topic-test-4
// * test.1
// * test.4
// * test.64
Expand Down Expand Up @@ -134,7 +134,7 @@ func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) err
c.Env = append(os.Environ(), fmt.Sprintf("CONFLUENT_PLATFORM_VERSION=%s", confluentPlatformVersion))
err := c.Run()
if err != nil {
return fmt.Errorf("failed to run docker-compose to start test enviroment: %w", err)
return fmt.Errorf("failed to run docker-compose to start test environment: %w", err)
}

// Set up toxiproxy Proxies
Expand Down Expand Up @@ -252,10 +252,10 @@ func tearDownDockerTestEnvironment(ctx context.Context, env *testEnvironment) er
c.Stderr = os.Stderr
rmErr := c.Run()
if downErr != nil {
return fmt.Errorf("failed to run docker-compose to stop test enviroment: %w", downErr)
return fmt.Errorf("failed to run docker-compose to stop test environment: %w", downErr)
}
if rmErr != nil {
return fmt.Errorf("failed to run docker-compose to rm test enviroment: %w", rmErr)
return fmt.Errorf("failed to run docker-compose to rm test environment: %w", rmErr)
}
return nil
}
Expand Down Expand Up @@ -342,14 +342,14 @@ func prepareTestTopics(ctx context.Context, env *testEnvironment) error {
}

// This is kind of gross, but we don't actually have support for doing transactional publishing
// with sarama, so we need to use a java-based tool to publish uncomitted messages to
// with sarama, so we need to use a java-based tool to publish uncommitted messages to
// the uncommitted-topic-test-4 topic
jarName := filepath.Base(uncomittedMsgJar)
if _, err := os.Stat(jarName); err != nil {
Logger.Printf("Downloading %s\n", uncomittedMsgJar)
req, err := http.NewRequest("GET", uncomittedMsgJar, nil)
if err != nil {
return fmt.Errorf("failed creating requst for uncomitted msg jar: %w", err)
return fmt.Errorf("failed creating requst for uncommitted msg jar: %w", err)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
Expand All @@ -358,13 +358,13 @@ func prepareTestTopics(ctx context.Context, env *testEnvironment) error {
defer res.Body.Close()
jarFile, err := os.OpenFile(jarName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644)
if err != nil {
return fmt.Errorf("failed opening the uncomitted msg jar: %w", err)
return fmt.Errorf("failed opening the uncommitted msg jar: %w", err)
}
defer jarFile.Close()

_, err = io.Copy(jarFile, res.Body)
if err != nil {
return fmt.Errorf("failed writing the uncomitted msg jar: %w", err)
return fmt.Errorf("failed writing the uncommitted msg jar: %w", err)
}
}

Expand All @@ -373,7 +373,7 @@ func prepareTestTopics(ctx context.Context, env *testEnvironment) error {
c.Stderr = os.Stderr
err = c.Run()
if err != nil {
return fmt.Errorf("failed running uncomitted msg jar: %w", err)
return fmt.Errorf("failed running uncommitted msg jar: %w", err)
}
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions mockbroker.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int)
// to facilitate testing of higher level or specialized consumers and producers
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
// but rather provides a facility to do that. It takes care of the TCP
// transport, request unmarshalling, response marshalling, and makes it the test
// transport, request unmarshalling, response marshaling, and makes it the test
// writer responsibility to program correct according to the Kafka API protocol
// MockBroker behaviour.
// MockBroker behavior.
//
// MockBroker is implemented as a TCP server listening on a kernel-selected
// localhost port that can accept many connections. It reads Kafka requests
Expand Down
2 changes: 1 addition & 1 deletion partitioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func NewHashPartitioner(topic string) Partitioner {

// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
// that but it had a mistake and now there are people depending on both behaviours. This will
// that but it had a mistake and now there are people depending on both behaviors. This will
// all go away on the next major version bump.
func NewReferenceHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
Expand Down

0 comments on commit 1e3101e

Please sign in to comment.