Skip to content

Commit

Permalink
enable linter: revive (indent-error-flow) (#3068)
Browse files Browse the repository at this point in the history
* enable linter: revive (indent-error-flow)

* lint
  • Loading branch information
mmetc authored Jun 10, 2024
1 parent 7fd01ae commit 31ed9fb
Show file tree
Hide file tree
Showing 10 changed files with 62 additions and 68 deletions.
3 changes: 0 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ linters-settings:
- "!**/pkg/database/*.go"
- "!**/pkg/exprhelpers/*.go"
- "!**/pkg/acquisition/modules/appsec/appsec.go"
- "!**/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go"
- "!**/pkg/apiserver/controllers/v1/errors.go"
yaml:
files:
Expand Down Expand Up @@ -147,8 +146,6 @@ linters-settings:
disabled: true
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: true
- name: import-alias-naming
disabled: true
- name: import-shadowing
Expand Down
4 changes: 2 additions & 2 deletions pkg/acquisition/modules/appsec/rx_operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,9 @@ func (o *rx) Evaluate(tx plugintypes.TransactionState, value string) bool {
tx.CaptureField(i, c)
}
return true
} else {
return o.re.MatchString(value)
}

return o.re.MatchString(value)
}

// RegisterRX registers the rx operator using a WASI implementation instead of Go.
Expand Down
22 changes: 11 additions & 11 deletions pkg/acquisition/modules/kinesis/kinesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"bytes"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"strings"
Expand Down Expand Up @@ -95,7 +96,7 @@ func (k *KinesisSource) newClient() error {
}

if sess == nil {
return fmt.Errorf("failed to create aws session")
return errors.New("failed to create aws session")
}
config := aws.NewConfig()
if k.Config.AwsRegion != "" {
Expand All @@ -106,7 +107,7 @@ func (k *KinesisSource) newClient() error {
}
k.kClient = kinesis.New(sess, config)
if k.kClient == nil {
return fmt.Errorf("failed to create kinesis client")
return errors.New("failed to create kinesis client")
}
return nil
}
Expand All @@ -124,24 +125,24 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error {

err := yaml.UnmarshalStrict(yamlConfig, &k.Config)
if err != nil {
return fmt.Errorf("Cannot parse kinesis datasource configuration: %w", err)
return fmt.Errorf("cannot parse kinesis datasource configuration: %w", err)
}

if k.Config.Mode == "" {
k.Config.Mode = configuration.TAIL_MODE
}

if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut {
return fmt.Errorf("stream_name is mandatory when use_enhanced_fanout is false")
return errors.New("stream_name is mandatory when use_enhanced_fanout is false")
}
if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut {
return fmt.Errorf("stream_arn is mandatory when use_enhanced_fanout is true")
return errors.New("stream_arn is mandatory when use_enhanced_fanout is true")
}
if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut {
return fmt.Errorf("consumer_name is mandatory when use_enhanced_fanout is true")
return errors.New("consumer_name is mandatory when use_enhanced_fanout is true")
}
if k.Config.StreamARN != "" && k.Config.StreamName != "" {
return fmt.Errorf("stream_arn and stream_name are mutually exclusive")
return errors.New("stream_arn and stream_name are mutually exclusive")
}
if k.Config.MaxRetries <= 0 {
k.Config.MaxRetries = 10
Expand Down Expand Up @@ -169,7 +170,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL
}

func (k *KinesisSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error {
return fmt.Errorf("kinesis datasource does not support command-line acquisition")
return errors.New("kinesis datasource does not support command-line acquisition")
}

func (k *KinesisSource) GetMode() string {
Expand All @@ -181,7 +182,7 @@ func (k *KinesisSource) GetName() string {
}

func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
return fmt.Errorf("kinesis datasource does not support one-shot acquisition")
return errors.New("kinesis datasource does not support one-shot acquisition")
}

func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) {
Expand Down Expand Up @@ -524,9 +525,8 @@ func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb)
defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming")
if k.Config.UseEnhancedFanOut {
return k.EnhancedRead(out, t)
} else {
return k.ReadFromStream(out, t)
}
return k.ReadFromStream(out, t)
})
return nil
}
Expand Down
25 changes: 11 additions & 14 deletions pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
Expand All @@ -13,7 +14,6 @@ import (

"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
)
Expand Down Expand Up @@ -120,34 +120,31 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu
resp, err := lc.Get(uri)
if err != nil {
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "error querying range")
} else {
lc.increaseTicker(ticker)
continue
return fmt.Errorf("error querying range: %w", err)
}
lc.increaseTicker(ticker)
continue
}

if resp.StatusCode != http.StatusOK {
lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode)
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "bad HTTP response code: %d: %s", resp.StatusCode, string(body))
} else {
lc.increaseTicker(ticker)
continue
return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err)
}
lc.increaseTicker(ticker)
continue
}

var lq LokiQueryRangeResponse
if err := json.NewDecoder(resp.Body).Decode(&lq); err != nil {
resp.Body.Close()
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "error decoding Loki response")
} else {
lc.increaseTicker(ticker)
continue
return fmt.Errorf("error decoding Loki response: %w", err)
}
lc.increaseTicker(ticker)
continue
}
resp.Body.Close()
lc.Logger.Tracef("Got response: %+v", lq)
Expand Down Expand Up @@ -261,7 +258,7 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) {

if err != nil {
lc.Logger.Errorf("Error connecting to websocket, err: %s", err)
return responseChan, fmt.Errorf("error connecting to websocket")
return responseChan, errors.New("error connecting to websocket")
}

lc.t.Go(func() error {
Expand Down
21 changes: 11 additions & 10 deletions pkg/acquisition/modules/s3/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ func extractBucketAndPrefixFromEventBridge(message *string) (string, string, err
if eventBody.Detail.Bucket.Name != "" {
return eventBody.Detail.Bucket.Name, eventBody.Detail.Object.Key, nil
}
return "", "", fmt.Errorf("invalid event body for event bridge format")
return "", "", errors.New("invalid event body for event bridge format")
}

func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) {
Expand All @@ -286,7 +286,7 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error)
return "", "", err
}
if len(s3notifBody.Records) == 0 {
return "", "", fmt.Errorf("no records found in S3 notification")
return "", "", errors.New("no records found in S3 notification")
}
if !strings.HasPrefix(s3notifBody.Records[0].EventName, "ObjectCreated:") {
return "", "", fmt.Errorf("event %s is not supported", s3notifBody.Records[0].EventName)
Expand All @@ -295,19 +295,20 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error)
}

func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, error) {
if s.Config.SQSFormat == SQSFormatEventBridge {
switch s.Config.SQSFormat {
case SQSFormatEventBridge:
bucket, key, err := extractBucketAndPrefixFromEventBridge(message)
if err != nil {
return "", "", err
}
return bucket, key, nil
} else if s.Config.SQSFormat == SQSFormatS3Notification {
case SQSFormatS3Notification:
bucket, key, err := extractBucketAndPrefixFromS3Notif(message)
if err != nil {
return "", "", err
}
return bucket, key, nil
} else {
default:
bucket, key, err := extractBucketAndPrefixFromEventBridge(message)
if err == nil {
s.Config.SQSFormat = SQSFormatEventBridge
Expand All @@ -318,7 +319,7 @@ func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, erro
s.Config.SQSFormat = SQSFormatS3Notification
return bucket, key, nil
}
return "", "", fmt.Errorf("SQS message format not supported")
return "", "", errors.New("SQS message format not supported")
}
}

Expand Down Expand Up @@ -496,15 +497,15 @@ func (s *S3Source) UnmarshalConfig(yamlConfig []byte) error {
}

if s.Config.BucketName != "" && s.Config.SQSName != "" {
return fmt.Errorf("bucket_name and sqs_name are mutually exclusive")
return errors.New("bucket_name and sqs_name are mutually exclusive")
}

if s.Config.PollingMethod == PollMethodSQS && s.Config.SQSName == "" {
return fmt.Errorf("sqs_name is required when using sqs polling method")
return errors.New("sqs_name is required when using sqs polling method")
}

if s.Config.BucketName == "" && s.Config.PollingMethod == PollMethodList {
return fmt.Errorf("bucket_name is required")
return errors.New("bucket_name is required")
}

if s.Config.SQSFormat != "" && s.Config.SQSFormat != SQSFormatEventBridge && s.Config.SQSFormat != SQSFormatS3Notification {
Expand Down Expand Up @@ -567,7 +568,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger *
dsn = strings.TrimPrefix(dsn, "s3://")
args := strings.Split(dsn, "?")
if len(args[0]) == 0 {
return fmt.Errorf("empty s3:// DSN")
return errors.New("empty s3:// DSN")
}

if len(args) == 2 && len(args[1]) != 0 {
Expand Down
7 changes: 4 additions & 3 deletions pkg/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,17 +111,18 @@ func SetKey(cacheName string, key string, value string, expiration *time.Duratio
func GetKey(cacheName string, key string) (string, error) {
for i, name := range CacheNames {
if name == cacheName {
if value, err := Caches[i].Get(key); err != nil {
value, err := Caches[i].Get(key)
if err != nil {
// do not warn or log if key not found
if errors.Is(err, gcache.KeyNotFoundError) {
return "", nil
}
CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err)

return "", err
} else {
return value.(string), nil
}

return value.(string), nil
}
}

Expand Down
3 changes: 1 addition & 2 deletions pkg/hubtest/hubtest_item.go
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,6 @@ func (t *HubTestItem) Run() error {
return t.RunWithLogFile()
} else if t.Config.NucleiTemplate != "" {
return t.RunWithNucleiTemplate()
} else {
return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name)
}
return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name)
}
32 changes: 16 additions & 16 deletions pkg/leakybucket/reset_filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,22 +82,22 @@ func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error {
cancelExprCacheLock.Unlock()
u.CancelOnFilter = compiled.CancelOnFilter
return nil
} else {
cancelExprCacheLock.Unlock()
//release the lock during compile
}

compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
bucketFactory.logger.Errorf("reset_filter compile error : %s", err)
return err
}
u.CancelOnFilter = compiledExpr.CancelOnFilter
if bucketFactory.Debug {
u.Debug = true
}
cancelExprCacheLock.Lock()
cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr
cancelExprCacheLock.Unlock()
cancelExprCacheLock.Unlock()
//release the lock during compile

compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
bucketFactory.logger.Errorf("reset_filter compile error : %s", err)
return err
}
return err
u.CancelOnFilter = compiledExpr.CancelOnFilter
if bucketFactory.Debug {
u.Debug = true
}
cancelExprCacheLock.Lock()
cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr
cancelExprCacheLock.Unlock()
return nil
}
6 changes: 2 additions & 4 deletions pkg/leakybucket/uniq.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,9 @@ func (u *Uniq) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Lea
leaky.logger.Debugf("Uniq(%s) : ok", element)
u.KeyCache[element] = true
return &msg

} else {
leaky.logger.Debugf("Uniq(%s) : ko, discard event", element)
return nil
}
leaky.logger.Debugf("Uniq(%s) : ko, discard event", element)
return nil
}
}

Expand Down
7 changes: 4 additions & 3 deletions pkg/types/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,12 @@ func (e *Event) SetParsed(key string, value string) bool {
}

func (e *Event) GetType() string {
if e.Type == OVFLW {
switch e.Type {
case OVFLW:
return "overflow"
} else if e.Type == LOG {
case LOG:
return "log"
} else {
default:
log.Warningf("unknown event type for %+v", e)
return "unknown"
}
Expand Down

0 comments on commit 31ed9fb

Please sign in to comment.