From d1f62263ef47552260852da981b3dacefff215cd Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 2 Feb 2024 10:08:34 +0100 Subject: [PATCH 001/119] add /usage-metrics endpoint definition --- pkg/models/localapi_swagger.yaml | 162 +++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 66132e5e36e..528463e4502 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -684,6 +684,36 @@ paths: $ref: "#/definitions/ErrorResponse" security: - JWTAuthorizer: [] + /usage-metrics: + post: + description: Post usage metrics from a LP or a bouncer + summary: Send usage metrics + tags: + - bouncers + - watchers + operationId: usage-metrics + produces: + - application/json + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/AllMetrics' + description: 'All metrics' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/SuccessResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] + - JWTAuthorizer: [] definitions: WatcherRegistrationRequest: title: WatcherRegistrationRequest @@ -994,6 +1024,128 @@ definitions: type: string value: type: string + RemediationComponentsMetrics: + title: RemediationComponentsMetrics + type: array + maxItems: 1 + items: + allOf: + - $ref: '#/definitions/BaseMetrics' + - type: object + properties: + type: + type: string + description: type of the remediation component + LogProcessorsMetrics: + title: LogProcessorsMetrics + type: array + maxItems: 1 + items: + allOf: + - $ref: '#/definitions/BaseMetrics' + - type: object + properties: + console_options: + $ref: '#/definitions/ConsoleOptions' + datasources: + type: object + description: Number of datasources per type + additionalProperties: + type: integer + AllMetrics: + title: AllMetrics + type: object + properties: + remediation_components: + type: array + items: + $ref: '#/definitions/RemediationComponentsMetrics' + description: remediation components metrics + log_processors: + type: array + items: + $ref: '#/definitions/LogProcessorsMetrics' + description: log processors metrics + BaseMetrics: + title: BaseMetrics + type: object + properties: + version: + type: string + description: version of the remediation component + meta: + type: object + $ref: '#/definitions/MetricsMeta' + description: metrics meta + os: + type: object + $ref: '#/definitions/OSversion' + description: OS information + metrics: + type: array + items: + $ref: '#/definitions/MetricsDetailItem' + description: metrics details + feature_flags: + type: array + items: + type: string + description: feature flags (expected to be empty for remediation components) + required: + - version + - os + - meta + OSversion: + title: OSversion + type: object + properties: + name: + type: string + description: name of the OS + version: + type: string + description: version of the OS + MetricsDetailItem: + title: MetricsDetailItem + type: object + properties: + name: + type: string + description: name of the metric + value: + type: number + description: value of the metric + unit: + type: string + description: unit of the metric + labels: + $ref: '#/definitions/MetricsLabels' + description: labels of the metric + MetricsMeta: + title: MetricsMeta + type: object + properties: + window_size_seconds: + type: integer + description: Size, in seconds, of the window used to compute the metric + utc_startup_timestamp: + type: number + description: UTC timestamp of the startup of the software + utc_now_timestamp: + type: number + description: UTC timestamp of the current time + MetricsLabels: + title: MetricsLabels + type: object + additionalProperties: + type: string + description: label of the metric + ConsoleOptions: + title: ConsoleOptions + type: array + items: + type: string + description: enabled console options ErrorResponse: type: "object" required: @@ -1007,6 +1159,16 @@ definitions: description: "more detail on individual errors" title: "error response" description: "error response return by the API" + SuccessResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "message" + title: "success response" + description: "success response return by the API" tags: - name: bouncers description: 'Operations about decisions : bans, captcha, rate-limit etc.' From 76c04fdd829e5375fe392c327183cfdaf16ff012 Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 8 Feb 2024 11:16:17 +0100 Subject: [PATCH 002/119] regenerate pkg/models --- pkg/models/all_metrics.go | 159 ++++++++++++ pkg/models/base_metrics.go | 240 +++++++++++++++++++ pkg/models/console_options.go | 27 +++ pkg/models/log_processors_metrics.go | 239 ++++++++++++++++++ pkg/models/metrics_detail_item.go | 115 +++++++++ pkg/models/metrics_labels.go | 27 +++ pkg/models/metrics_meta.go | 56 +++++ pkg/models/o_sversion.go | 53 ++++ pkg/models/remediation_components_metrics.go | 188 +++++++++++++++ pkg/models/success_response.go | 73 ++++++ 10 files changed, 1177 insertions(+) create mode 100644 pkg/models/all_metrics.go create mode 100644 pkg/models/base_metrics.go create mode 100644 pkg/models/console_options.go create mode 100644 pkg/models/log_processors_metrics.go create mode 100644 pkg/models/metrics_detail_item.go create mode 100644 pkg/models/metrics_labels.go create mode 100644 pkg/models/metrics_meta.go create mode 100644 pkg/models/o_sversion.go create mode 100644 pkg/models/remediation_components_metrics.go create mode 100644 pkg/models/success_response.go diff --git a/pkg/models/all_metrics.go b/pkg/models/all_metrics.go new file mode 100644 index 00000000000..27f4ead8cd4 --- /dev/null +++ b/pkg/models/all_metrics.go @@ -0,0 +1,159 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AllMetrics AllMetrics +// +// swagger:model AllMetrics +type AllMetrics struct { + + // log processors metrics + LogProcessors []LogProcessorsMetrics `json:"log_processors"` + + // remediation components metrics + RemediationComponents []RemediationComponentsMetrics `json:"remediation_components"` +} + +// Validate validates this all metrics +func (m *AllMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLogProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRemediationComponents(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllMetrics) validateLogProcessors(formats strfmt.Registry) error { + if swag.IsZero(m.LogProcessors) { // not required + return nil + } + + for i := 0; i < len(m.LogProcessors); i++ { + + if err := m.LogProcessors[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err + } + + } + + return nil +} + +func (m *AllMetrics) validateRemediationComponents(formats strfmt.Registry) error { + if swag.IsZero(m.RemediationComponents) { // not required + return nil + } + + for i := 0; i < len(m.RemediationComponents); i++ { + + if err := m.RemediationComponents[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err + } + + } + + return nil +} + +// ContextValidate validate this all metrics based on the context it is used +func (m *AllMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLogProcessors(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateRemediationComponents(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllMetrics) contextValidateLogProcessors(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.LogProcessors); i++ { + + if err := m.LogProcessors[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err + } + + } + + return nil +} + +func (m *AllMetrics) contextValidateRemediationComponents(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.RemediationComponents); i++ { + + if err := m.RemediationComponents[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AllMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AllMetrics) UnmarshalBinary(b []byte) error { + var res AllMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go new file mode 100644 index 00000000000..88ededa6b71 --- /dev/null +++ b/pkg/models/base_metrics.go @@ -0,0 +1,240 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BaseMetrics BaseMetrics +// +// swagger:model BaseMetrics +type BaseMetrics struct { + + // feature flags (expected to be empty for remediation components) + FeatureFlags []string `json:"feature_flags"` + + // metrics meta + // Required: true + Meta *MetricsMeta `json:"meta"` + + // metrics details + Metrics []*MetricsDetailItem `json:"metrics"` + + // OS information + // Required: true + Os *OSversion `json:"os"` + + // version of the remediation component + // Required: true + Version *string `json:"version"` +} + +// Validate validates this base metrics +func (m *BaseMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMetrics(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVersion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BaseMetrics) validateMeta(formats strfmt.Registry) error { + + if err := validate.Required("meta", "body", m.Meta); err != nil { + return err + } + + if m.Meta != nil { + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +func (m *BaseMetrics) validateMetrics(formats strfmt.Registry) error { + if swag.IsZero(m.Metrics) { // not required + return nil + } + + for i := 0; i < len(m.Metrics); i++ { + if swag.IsZero(m.Metrics[i]) { // not required + continue + } + + if m.Metrics[i] != nil { + if err := m.Metrics[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metrics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metrics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BaseMetrics) validateOs(formats strfmt.Registry) error { + + if err := validate.Required("os", "body", m.Os); err != nil { + return err + } + + if m.Os != nil { + if err := m.Os.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("os") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("os") + } + return err + } + } + + return nil +} + +func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { + + if err := validate.Required("version", "body", m.Version); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this base metrics based on the context it is used +func (m *BaseMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMetrics(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateOs(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BaseMetrics) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if m.Meta != nil { + + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +func (m *BaseMetrics) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Metrics); i++ { + + if m.Metrics[i] != nil { + + if swag.IsZero(m.Metrics[i]) { // not required + return nil + } + + if err := m.Metrics[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metrics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metrics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BaseMetrics) contextValidateOs(ctx context.Context, formats strfmt.Registry) error { + + if m.Os != nil { + + if err := m.Os.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("os") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("os") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BaseMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BaseMetrics) UnmarshalBinary(b []byte) error { + var res BaseMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/console_options.go b/pkg/models/console_options.go new file mode 100644 index 00000000000..87983ab1762 --- /dev/null +++ b/pkg/models/console_options.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// ConsoleOptions ConsoleOptions +// +// swagger:model ConsoleOptions +type ConsoleOptions []string + +// Validate validates this console options +func (m ConsoleOptions) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this console options based on context it is used +func (m ConsoleOptions) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go new file mode 100644 index 00000000000..10b6491d28a --- /dev/null +++ b/pkg/models/log_processors_metrics.go @@ -0,0 +1,239 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogProcessorsMetrics LogProcessorsMetrics +// +// swagger:model LogProcessorsMetrics +type LogProcessorsMetrics []*LogProcessorsMetricsItems0 + +// Validate validates this log processors metrics +func (m LogProcessorsMetrics) Validate(formats strfmt.Registry) error { + var res []error + + iLogProcessorsMetricsSize := int64(len(m)) + + if err := validate.MaxItems("", "body", iLogProcessorsMetricsSize, 1); err != nil { + return err + } + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this log processors metrics based on the context it is used +func (m LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// LogProcessorsMetricsItems0 log processors metrics items0 +// +// swagger:model LogProcessorsMetricsItems0 +type LogProcessorsMetricsItems0 struct { + BaseMetrics + + // console options + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + + // Number of datasources per type + Datasources map[string]int64 `json:"datasources,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + + Datasources map[string]int64 `json:"datasources,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.ConsoleOptions = dataAO1.ConsoleOptions + + m.Datasources = dataAO1.Datasources + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m LogProcessorsMetricsItems0) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + + Datasources map[string]int64 `json:"datasources,omitempty"` + } + + dataAO1.ConsoleOptions = m.ConsoleOptions + + dataAO1.Datasources = m.Datasources + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this log processors metrics items0 +func (m *LogProcessorsMetricsItems0) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConsoleOptions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogProcessorsMetricsItems0) validateConsoleOptions(formats strfmt.Registry) error { + + if swag.IsZero(m.ConsoleOptions) { // not required + return nil + } + + if err := m.ConsoleOptions.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// ContextValidate validate this log processors metrics items0 based on the context it is used +func (m *LogProcessorsMetricsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConsoleOptions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogProcessorsMetricsItems0) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { + + if err := m.ConsoleOptions.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogProcessorsMetricsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogProcessorsMetricsItems0) UnmarshalBinary(b []byte) error { + var res LogProcessorsMetricsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go new file mode 100644 index 00000000000..ed13470eece --- /dev/null +++ b/pkg/models/metrics_detail_item.go @@ -0,0 +1,115 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MetricsDetailItem MetricsDetailItem +// +// swagger:model MetricsDetailItem +type MetricsDetailItem struct { + + // labels of the metric + Labels MetricsLabels `json:"labels,omitempty"` + + // name of the metric + Name string `json:"name,omitempty"` + + // unit of the metric + Unit string `json:"unit,omitempty"` + + // value of the metric + Value float64 `json:"value,omitempty"` +} + +// Validate validates this metrics detail item +func (m *MetricsDetailItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLabels(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsDetailItem) validateLabels(formats strfmt.Registry) error { + if swag.IsZero(m.Labels) { // not required + return nil + } + + if m.Labels != nil { + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + } + + return nil +} + +// ContextValidate validate this metrics detail item based on the context it is used +func (m *MetricsDetailItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsDetailItem) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if swag.IsZero(m.Labels) { // not required + return nil + } + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsDetailItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsDetailItem) UnmarshalBinary(b []byte) error { + var res MetricsDetailItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_labels.go b/pkg/models/metrics_labels.go new file mode 100644 index 00000000000..d807a88bc8d --- /dev/null +++ b/pkg/models/metrics_labels.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// MetricsLabels MetricsLabels +// +// swagger:model MetricsLabels +type MetricsLabels map[string]string + +// Validate validates this metrics labels +func (m MetricsLabels) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this metrics labels based on context it is used +func (m MetricsLabels) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/metrics_meta.go b/pkg/models/metrics_meta.go new file mode 100644 index 00000000000..9320fb79489 --- /dev/null +++ b/pkg/models/metrics_meta.go @@ -0,0 +1,56 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MetricsMeta MetricsMeta +// +// swagger:model MetricsMeta +type MetricsMeta struct { + + // UTC timestamp of the current time + UtcNowTimestamp float64 `json:"utc_now_timestamp,omitempty"` + + // UTC timestamp of the startup of the software + UtcStartupTimestamp float64 `json:"utc_startup_timestamp,omitempty"` + + // Size, in seconds, of the window used to compute the metric + WindowSizeSeconds int64 `json:"window_size_seconds,omitempty"` +} + +// Validate validates this metrics meta +func (m *MetricsMeta) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this metrics meta based on context it is used +func (m *MetricsMeta) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsMeta) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsMeta) UnmarshalBinary(b []byte) error { + var res MetricsMeta + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go new file mode 100644 index 00000000000..ae0a85f859e --- /dev/null +++ b/pkg/models/o_sversion.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// OSversion OSversion +// +// swagger:model OSversion +type OSversion struct { + + // name of the OS + Name string `json:"name,omitempty"` + + // version of the OS + Version string `json:"version,omitempty"` +} + +// Validate validates this o sversion +func (m *OSversion) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this o sversion based on context it is used +func (m *OSversion) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *OSversion) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OSversion) UnmarshalBinary(b []byte) error { + var res OSversion + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/remediation_components_metrics.go b/pkg/models/remediation_components_metrics.go new file mode 100644 index 00000000000..5506558c13a --- /dev/null +++ b/pkg/models/remediation_components_metrics.go @@ -0,0 +1,188 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// RemediationComponentsMetrics RemediationComponentsMetrics +// +// swagger:model RemediationComponentsMetrics +type RemediationComponentsMetrics []*RemediationComponentsMetricsItems0 + +// Validate validates this remediation components metrics +func (m RemediationComponentsMetrics) Validate(formats strfmt.Registry) error { + var res []error + + iRemediationComponentsMetricsSize := int64(len(m)) + + if err := validate.MaxItems("", "body", iRemediationComponentsMetricsSize, 1); err != nil { + return err + } + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this remediation components metrics based on the context it is used +func (m RemediationComponentsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// RemediationComponentsMetricsItems0 remediation components metrics items0 +// +// swagger:model RemediationComponentsMetricsItems0 +type RemediationComponentsMetricsItems0 struct { + BaseMetrics + + // type of the remediation component + Type string `json:"type,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *RemediationComponentsMetricsItems0) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + Type string `json:"type,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.Type = dataAO1.Type + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m RemediationComponentsMetricsItems0) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + Type string `json:"type,omitempty"` + } + + dataAO1.Type = m.Type + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this remediation components metrics items0 +func (m *RemediationComponentsMetricsItems0) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this remediation components metrics items0 based on the context it is used +func (m *RemediationComponentsMetricsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *RemediationComponentsMetricsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RemediationComponentsMetricsItems0) UnmarshalBinary(b []byte) error { + var res RemediationComponentsMetricsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/success_response.go b/pkg/models/success_response.go new file mode 100644 index 00000000000..e8fc281c090 --- /dev/null +++ b/pkg/models/success_response.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SuccessResponse success response +// +// success response return by the API +// +// swagger:model SuccessResponse +type SuccessResponse struct { + + // message + // Required: true + Message *string `json:"message"` +} + +// Validate validates this success response +func (m *SuccessResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SuccessResponse) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this success response based on context it is used +func (m *SuccessResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SuccessResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SuccessResponse) UnmarshalBinary(b []byte) error { + var res SuccessResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} From c325c2765d0bc89ea148660f12846dd1c624c48d Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 12 Mar 2024 15:29:59 +0100 Subject: [PATCH 003/119] (wip) lp metrics --- cmd/crowdsec/crowdsec.go | 17 + cmd/crowdsec/lpmetrics.go | 183 +++ pkg/apiclient/client.go | 3 + pkg/apiclient/usagemetrics.go | 29 + pkg/apiserver/controllers/controller.go | 17 + pkg/apiserver/controllers/v1/usagemetrics.go | 141 ++ pkg/csconfig/crowdsec_service.go | 24 + pkg/csconfig/crowdsec_service_test.go | 3 + pkg/csconfig/database.go | 10 +- pkg/database/bouncers.go | 23 + pkg/database/ent/alert.go | 22 +- pkg/database/ent/alert/alert.go | 2 - pkg/database/ent/alert/where.go | 20 - pkg/database/ent/alert_create.go | 10 +- pkg/database/ent/alert_update.go | 66 +- pkg/database/ent/bouncer.go | 59 +- pkg/database/ent/bouncer/bouncer.go | 26 +- pkg/database/ent/bouncer/where.go | 260 +++- pkg/database/ent/bouncer_create.go | 64 +- pkg/database/ent/bouncer_update.go | 208 ++- pkg/database/ent/client.go | 152 +- pkg/database/ent/configitem.go | 22 +- pkg/database/ent/configitem/configitem.go | 2 - pkg/database/ent/configitem/where.go | 20 - pkg/database/ent/configitem_create.go | 10 +- pkg/database/ent/configitem_update.go | 66 +- pkg/database/ent/decision.go | 22 +- pkg/database/ent/decision/decision.go | 2 - pkg/database/ent/decision/where.go | 20 - pkg/database/ent/decision_create.go | 10 +- pkg/database/ent/decision_update.go | 66 +- pkg/database/ent/ent.go | 2 + pkg/database/ent/event.go | 22 +- pkg/database/ent/event/event.go | 2 - pkg/database/ent/event/where.go | 20 - pkg/database/ent/event_create.go | 10 +- pkg/database/ent/event_update.go | 66 +- pkg/database/ent/hook/hook.go | 12 + pkg/database/ent/lock_update.go | 34 - pkg/database/ent/machine.go | 74 +- pkg/database/ent/machine/machine.go | 29 +- pkg/database/ent/machine/where.go | 270 +++- pkg/database/ent/machine_create.go | 75 +- pkg/database/ent/machine_update.go | 293 ++-- pkg/database/ent/meta.go | 22 +- pkg/database/ent/meta/meta.go | 2 - pkg/database/ent/meta/where.go | 20 - pkg/database/ent/meta_create.go | 10 +- pkg/database/ent/meta_update.go | 52 +- pkg/database/ent/metric.go | 154 ++ pkg/database/ent/metric/metric.go | 104 ++ pkg/database/ent/metric/where.go | 330 ++++ pkg/database/ent/metric_create.go | 246 +++ pkg/database/ent/metric_delete.go | 88 ++ pkg/database/ent/metric_query.go | 526 +++++++ pkg/database/ent/metric_update.go | 228 +++ pkg/database/ent/migrate/schema.go | 58 +- pkg/database/ent/mutation.go | 1427 ++++++++++++++---- pkg/database/ent/predicate/predicate.go | 3 + pkg/database/ent/runtime.go | 14 - pkg/database/ent/schema/alert.go | 4 +- pkg/database/ent/schema/bouncer.go | 7 +- pkg/database/ent/schema/config.go | 7 +- pkg/database/ent/schema/decision.go | 4 +- pkg/database/ent/schema/event.go | 4 +- pkg/database/ent/schema/lock.go | 2 +- pkg/database/ent/schema/machine.go | 23 +- pkg/database/ent/schema/meta.go | 5 +- pkg/database/ent/schema/metric.go | 53 + pkg/database/ent/tx.go | 3 + pkg/database/flush.go | 2 + pkg/database/machines.go | 25 + pkg/database/metrics.go | 35 + pkg/models/hub_item.go | 53 + pkg/models/hub_items.go | 67 + pkg/models/localapi_swagger.yaml | 21 +- pkg/models/log_processors_metrics.go | 57 + pkg/models/metrics_meta.go | 4 +- test/bats/11_bouncers_tls.bats | 2 +- test/bats/30_machines.bats | 37 + test/bats/97_ipv4_single.bats | 2 +- test/bats/97_ipv6_single.bats | 2 +- test/bats/98_ipv4_range.bats | 2 +- test/bats/98_ipv6_range.bats | 2 +- test/bats/99_lapi-stream-mode-scenario.bats | 2 +- test/bats/99_lapi-stream-mode-scopes.bats | 2 +- test/bats/99_lapi-stream-mode.bats | 2 +- test/lib/setup_file.sh | 16 + 88 files changed, 5103 insertions(+), 1114 deletions(-) create mode 100644 cmd/crowdsec/lpmetrics.go create mode 100644 pkg/apiclient/usagemetrics.go create mode 100644 pkg/apiserver/controllers/v1/usagemetrics.go create mode 100644 pkg/database/ent/metric.go create mode 100644 pkg/database/ent/metric/metric.go create mode 100644 pkg/database/ent/metric/where.go create mode 100644 pkg/database/ent/metric_create.go create mode 100644 pkg/database/ent/metric_delete.go create mode 100644 pkg/database/ent/metric_query.go create mode 100644 pkg/database/ent/metric_update.go create mode 100644 pkg/database/ent/schema/metric.go create mode 100644 pkg/database/metrics.go create mode 100644 pkg/models/hub_item.go create mode 100644 pkg/models/hub_items.go diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index f604af1dedd..3e7c0b43737 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -9,6 +9,7 @@ import ( "time" log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" "github.com/crowdsecurity/go-cs-lib/trace" @@ -146,6 +147,22 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H }) outputWg.Wait() + mp := NewMetricsProvider( + apiClient, + *cConfig.Crowdsec.MetricsInterval, + log.WithField("service", "lpmetrics"), + cConfig.API.Server.ConsoleConfig.EnabledOptions(), + datasources, + hub, + ) + + lpMetricsTomb := tomb.Tomb{} + + lpMetricsTomb.Go(func() error { + // XXX: context? + return mp.Run(context.Background(), &lpMetricsTomb) + }) + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { aggregated := false if cConfig.Prometheus.Level == configuration.CFG_METRICS_AGGREGATE { diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go new file mode 100644 index 00000000000..2d43a4a4462 --- /dev/null +++ b/cmd/crowdsec/lpmetrics.go @@ -0,0 +1,183 @@ +package main + +import ( + "context" + "errors" + "net/http" + "github.com/sirupsen/logrus" + "github.com/blackfireio/osinfo" + "time" + + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/fflag" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// MetricsProvider collects metrics from the LP and sends them to the LAPI +type MetricsProvider struct { + apic *apiclient.ApiClient + interval time.Duration + static staticMetrics + logger *logrus.Entry +} + +type staticMetrics struct { + osName string + osVersion string + startupTS int64 + featureFlags []string + consoleOptions []string + datasourceMap map[string]int64 + hubState models.HubItems +} + +func getHubState(hub *cwhub.Hub) models.HubItems { + ret := models.HubItems{} + + for _, itemType := range cwhub.ItemTypes { + items, _ := hub.GetInstalledItems(itemType) + for _, item := range items { + status := "official" + if item.State.IsLocal() { + status = "custom" + } + if item.State.Tainted { + status = "tainted" + } + ret[item.FQName()] = models.HubItem{ + Version: item.Version, + Status: status, + } + } + } + + return ret +} + +// newStaticMetrics is called when the process starts, or reloads the configuration +func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) staticMetrics { + datasourceMap := map[string]int64{} + + for _, ds := range datasources { + datasourceMap[ds.GetName()] += 1 + } + + osName, osVersion := detectOS() + + return staticMetrics{ + osName: osName, + osVersion: osVersion, + startupTS: time.Now().Unix(), + featureFlags: fflag.Crowdsec.GetEnabledFeatures(), + consoleOptions: consoleOptions, + datasourceMap: datasourceMap, + hubState: getHubState(hub), + } +} + +func detectOS() (string, string) { + if cwversion.System == "docker" { + return "docker", "" + } + + osInfo, err := osinfo.GetOSInfo() + if err != nil { + return cwversion.System, "???" + } + + return osInfo.Name, osInfo.Version +} + + +func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + return &MetricsProvider{ + apic: apic, + interval: interval, + logger: logger, + static: newStaticMetrics(consoleOptions, datasources, hub), + } +} + +func (m *MetricsProvider) metricsPayload() *models.AllMetrics { + meta := &models.MetricsMeta{ + UtcStartupTimestamp: m.static.startupTS, + WindowSizeSeconds: int64(m.interval.Seconds()), + } + + os := &models.OSversion{ + Name: m.static.osName, + Version: m.static.osVersion, + } + + base := models.BaseMetrics{ + Meta: meta, + Os: os, + Version: ptr.Of(cwversion.VersionStr()), + FeatureFlags: m.static.featureFlags, + } + + item0 := &models.LogProcessorsMetricsItems0{ + BaseMetrics: base, + ConsoleOptions: m.static.consoleOptions, + Datasources: m.static.datasourceMap, + HubItems: m.static.hubState, + } + + // TODO: more metric details... ? + + return &models.AllMetrics{ + LogProcessors: []models.LogProcessorsMetrics{{item0}}, + } +} + + +func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { + defer trace.CatchPanic("crowdsec/MetricsProvider.Run") + + if m.interval == time.Duration(0) { + return nil + } + + met := m.metricsPayload() + + ticker := time.NewTicker(m.interval) + + for { + select { + case <-ticker.C: + met.LogProcessors[0][0].Meta.UtcNowTimestamp = time.Now().Unix() + + ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + _, resp, err := m.apic.UsageMetrics.Add(ctxTime, met) + switch { + case errors.Is(err, context.DeadlineExceeded): + m.logger.Warnf("timeout sending lp metrics") + continue + case err != nil: + m.logger.Warnf("failed to send lp metrics: %s", err) + continue + } + + if resp.Response.StatusCode != http.StatusCreated { + m.logger.Warnf("failed to send lp metrics: %s", resp.Response.Status) + continue + } + + m.logger.Tracef("lp usage metrics sent") + case <-myTomb.Dying(): + ticker.Stop() + return nil + } + } +} diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index e0e521d6a6f..1cb78ef78ed 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -39,6 +39,7 @@ type ApiClient struct { Metrics *MetricsService Signal *SignalService HeartBeat *HeartBeatService + UsageMetrics *UsageMetricsService } func (a *ApiClient) GetClient() *http.Client { @@ -101,6 +102,7 @@ func NewClient(config *Config) (*ApiClient, error) { c.Signal = (*SignalService)(&c.common) c.DecisionDelete = (*DecisionDeleteService)(&c.common) c.HeartBeat = (*HeartBeatService)(&c.common) + c.UsageMetrics = (*UsageMetricsService)(&c.common) return c, nil } @@ -137,6 +139,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt c.Signal = (*SignalService)(&c.common) c.DecisionDelete = (*DecisionDeleteService)(&c.common) c.HeartBeat = (*HeartBeatService)(&c.common) + c.UsageMetrics = (*UsageMetricsService)(&c.common) return c, nil } diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go new file mode 100644 index 00000000000..6649913a459 --- /dev/null +++ b/pkg/apiclient/usagemetrics.go @@ -0,0 +1,29 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type UsageMetricsService service + +func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetrics) (interface{}, *Response, error) { + u := fmt.Sprintf("%s/usage-metrics/", s.client.URLPrefix) + + req, err := s.client.NewRequest(http.MethodPost, u, &metrics) + if err != nil { + return nil, nil, err + } + + var response interface{} + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, err + } + + return &response, resp, nil +} diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index bab1965123e..1ee61de0bd1 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -59,6 +59,17 @@ func serveHealth() http.HandlerFunc { return health.NewHandler(checker) } +func eitherAuthMiddleware(jwtMiddleware gin.HandlerFunc, apiKeyMiddleware gin.HandlerFunc) gin.HandlerFunc { + return func(c *gin.Context) { + // XXX: what when there's no api key for a RC? + if c.GetHeader("X-Api-Key") != "" { + apiKeyMiddleware(c) + } else { + jwtMiddleware(c) + } + } +} + func (c *Controller) NewV1() error { var err error @@ -117,6 +128,12 @@ func (c *Controller) NewV1() error { apiKeyAuth.HEAD("/decisions/stream", c.HandlerV1.StreamDecision) } + eitherAuth := groupV1.Group("") + eitherAuth.Use(eitherAuthMiddleware(c.HandlerV1.Middlewares.JWT.Middleware.MiddlewareFunc(), c.HandlerV1.Middlewares.APIKey.MiddlewareFunc())) + { + eitherAuth.POST("/usage-metrics", c.HandlerV1.UsageMetrics) + } + return nil } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go new file mode 100644 index 00000000000..8fda148cee4 --- /dev/null +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -0,0 +1,141 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + + +// updateBaseMetrics updates the base metrics for a machine or bouncer +func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics *models.BaseMetrics, hubItems *models.HubItems) error { + switch { + case machineID != "": + c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems) + case bouncer != nil: + c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) + default: + return fmt.Errorf("no machineID or bouncerName set") + } + + return nil +} + + +// UsageMetrics receives metrics from log processors and remediation components +func (c *Controller) UsageMetrics(gctx *gin.Context) { + var input models.AllMetrics + + // parse the payload + + if err := gctx.ShouldBindJSON(&input); err != nil { + log.Errorf("Failed to bind json: %s", err) + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + + if err := input.Validate(strfmt.Default); err != nil { + log.Errorf("Failed to validate usage metrics: %s", err) + c.HandleDBErrors(gctx, err) + return + } + + // TODO: validate payload with the right type, depending on auth context + + var ( + generatedType metric.GeneratedType + generatedBy string + collectedAt time.Time + ) + + bouncer, _ := getBouncerFromContext(gctx) + if bouncer != nil { + log.Tracef("Received usage metris for bouncer: %s", bouncer.Name) + generatedType = metric.GeneratedTypeRC + generatedBy = bouncer.Name + } + + machineID, _ := getMachineIDFromContext(gctx) + if machineID != "" { + log.Tracef("Received usage metrics for log processor: %s", machineID) + generatedType = metric.GeneratedTypeLP + generatedBy = machineID + } + + // TODO: if both or none are set, which error should we return? + + var ( + payload map[string]any + baseMetrics models.BaseMetrics + hubItems models.HubItems + ) + + switch len(input.LogProcessors) { + case 0: + break + case 1: + // the final slice can't have more than one item, + // guaranteed by the swagger schema + item0 := input.LogProcessors[0][0] + payload = map[string]any{ + "console_options": item0.ConsoleOptions, + "datasources": item0.Datasources, + } + baseMetrics = item0.BaseMetrics + hubItems = item0.HubItems + default: + log.Errorf("Payload has more than one log processor") + // this is not checked in the swagger schema + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one log processor"}) + return + } + + switch len(input.RemediationComponents) { + case 0: + break + case 1: + item0 := input.RemediationComponents[0][0] + payload = map[string]any{ + "type": item0.Type, + // TODO: RC stuff like traffic stats + } + baseMetrics = item0.BaseMetrics + default: + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one remediation component"}) + return + } + + err := c.updateBaseMetrics(machineID, bouncer, &baseMetrics, &hubItems) + if err != nil { + log.Errorf("Failed to update base metrics: %s", err) + c.HandleDBErrors(gctx, err) + return + } + + collectedAt = time.Unix(baseMetrics.Meta.UtcNowTimestamp, 0).UTC() + + jsonPayload, err := json.Marshal(payload) + if err != nil { + log.Errorf("Failed to marshal usage metrics: %s", err) + c.HandleDBErrors(gctx, err) + return + } + + if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, collectedAt, string(jsonPayload)); err != nil { + log.Errorf("Failed to store usage metrics: %s", err) + c.HandleDBErrors(gctx, err) + return + } + + // empty body + gctx.Status(http.StatusCreated) +} diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 7820595b46f..f4103293f0d 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "time" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" @@ -26,6 +27,7 @@ type CrowdsecServiceCfg struct { BucketStateFile string `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start BucketStateDumpDir string `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown BucketsGCEnabled bool `yaml:"-"` // we need to garbage collect buckets when in forensic mode + MetricsInterval *time.Duration `yaml:"metrics_interval,omitempty"` SimulationFilePath string `yaml:"-"` ContextToSend map[string][]string `yaml:"-"` @@ -132,6 +134,8 @@ func (c *Config) LoadCrowdsec() error { c.Crowdsec.AcquisitionFiles[i] = f } + c.Crowdsec.setMetricsInterval() + if err = c.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } @@ -139,6 +143,26 @@ func (c *Config) LoadCrowdsec() error { return nil } +const ( + defaultMetricsInterval = 30 * time.Second + minimumMetricsInterval = 15 * time.Second +) + +func (c *CrowdsecServiceCfg) setMetricsInterval() { + switch { + case c.MetricsInterval == nil: + c.MetricsInterval = ptr.Of(defaultMetricsInterval) + log.Tracef("metrics_interval is not set, default to %s", defaultMetricsInterval) + case *c.MetricsInterval == time.Duration(0): + log.Info("metrics_interval is set to 0, disabling metrics") + case *c.MetricsInterval < minimumMetricsInterval: + c.MetricsInterval = ptr.Of(minimumMetricsInterval) + log.Warnf("metrics_interval is too low, setting it to %s", minimumMetricsInterval) + default: + log.Tracef("metrics_interval set to %s", c.MetricsInterval) + } +} + func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { // XXX: MakeDirs out, err := yaml.Marshal(c.ContextToSend) diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 8d332271b03..454681e8cce 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -58,6 +58,7 @@ func TestLoadCrowdsec(t *testing.T) { ParserRoutinesCount: 1, OutputRoutinesCount: 1, ConsoleContextValueLength: 2500, + MetricsInterval: ptr.Of(defaultMetricsInterval), AcquisitionFiles: []string{acquisFullPath}, SimulationFilePath: "./testdata/simulation.yaml", // context is loaded in pkg/alertcontext @@ -98,6 +99,7 @@ func TestLoadCrowdsec(t *testing.T) { ParserRoutinesCount: 1, OutputRoutinesCount: 1, ConsoleContextValueLength: 0, + MetricsInterval: ptr.Of(defaultMetricsInterval), AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath}, // context is loaded in pkg/alertcontext // ContextToSend: map[string][]string{ @@ -136,6 +138,7 @@ func TestLoadCrowdsec(t *testing.T) { ParserRoutinesCount: 1, OutputRoutinesCount: 1, ConsoleContextValueLength: 10, + MetricsInterval: ptr.Of(defaultMetricsInterval), AcquisitionFiles: []string{}, SimulationFilePath: "", // context is loaded in pkg/alertcontext diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index a7bc57eefdc..0af9792fa2e 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -39,6 +39,7 @@ type DatabaseCfg struct { } type AuthGCCfg struct { + // XXX: define these as custom type (with days etc.) ? Cert *string `yaml:"cert,omitempty"` CertDuration *time.Duration Api *string `yaml:"api_key,omitempty"` @@ -48,11 +49,12 @@ type AuthGCCfg struct { } type FlushDBCfg struct { - MaxItems *int `yaml:"max_items,omitempty"` + MaxItems *int `yaml:"max_items,omitempty"` // We could unmarshal as time.Duration, but alert filters right now are a map of strings - MaxAge *string `yaml:"max_age,omitempty"` - BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` - AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MaxAge *string `yaml:"max_age,omitempty"` + BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` + AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MetricsMaxAge *time.Duration `yaml:"metrics_max_age,omitempty"` } func (c *Config) LoadDBConfig(inCli bool) error { diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 2cc6b9dcb47..c0a90914c75 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -2,14 +2,37 @@ package database import ( "fmt" + "strings" "time" "github.com/pkg/errors" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" ) +func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics *models.BaseMetrics) error { + os := baseMetrics.Os + features := strings.Join(baseMetrics.FeatureFlags, ",") + + // XXX: bouncers have no heartbeat, they have "last pull", are we updating it? + + _, err := c.Ent.Bouncer. + Update(). + Where(bouncer.NameEQ(bouncerName)). + SetNillableVersion(baseMetrics.Version). + SetOsname(os.Name). + SetOsversion(os.Version). + SetFeatureflags(features). + SetType(bouncerType). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update base bouncer metrics in database: %s", err) + } + return nil +} + func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) if err != nil { diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 5cb4d1a352c..6da9f0efe76 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -19,9 +19,9 @@ type Alert struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Scenario holds the value of the "scenario" field. Scenario string `json:"scenario,omitempty"` // BucketId holds the value of the "bucketId" field. @@ -168,15 +168,13 @@ func (a *Alert) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - a.CreatedAt = new(time.Time) - *a.CreatedAt = value.Time + a.CreatedAt = value.Time } case alert.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - a.UpdatedAt = new(time.Time) - *a.UpdatedAt = value.Time + a.UpdatedAt = value.Time } case alert.FieldScenario: if value, ok := values[i].(*sql.NullString); !ok { @@ -367,15 +365,11 @@ func (a *Alert) String() string { var builder strings.Builder builder.WriteString("Alert(") builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) - if v := a.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := a.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(a.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("scenario=") builder.WriteString(a.Scenario) diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index eb9f1d10788..16e0b019e14 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -152,8 +152,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index 516ead50636..c109b78704b 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -210,16 +210,6 @@ func CreatedAtLTE(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Alert { - return predicate.Alert(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Alert { - return predicate.Alert(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) @@ -260,16 +250,6 @@ func UpdatedAtLTE(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Alert { - return predicate.Alert(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Alert { - return predicate.Alert(sql.FieldNotNull(FieldUpdatedAt)) -} - // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldScenario, v)) diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index c7498442c06..45a6e40b64f 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -473,6 +473,12 @@ func (ac *AlertCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ac *AlertCreate) check() error { + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Alert.created_at"`)} + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Alert.updated_at"`)} + } if _, ok := ac.mutation.Scenario(); !ok { return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Alert.scenario"`)} } @@ -507,11 +513,11 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { ) if value, ok := ac.mutation.CreatedAt(); ok { _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := ac.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := ac.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index f8a4d108527..8b88c35c7d7 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -32,30 +32,12 @@ func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate { return au } -// SetCreatedAt sets the "created_at" field. -func (au *AlertUpdate) SetCreatedAt(t time.Time) *AlertUpdate { - au.mutation.SetCreatedAt(t) - return au -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (au *AlertUpdate) ClearCreatedAt() *AlertUpdate { - au.mutation.ClearCreatedAt() - return au -} - // SetUpdatedAt sets the "updated_at" field. func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { au.mutation.SetUpdatedAt(t) return au } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (au *AlertUpdate) ClearUpdatedAt() *AlertUpdate { - au.mutation.ClearUpdatedAt() - return au -} - // SetScenario sets the "scenario" field. func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { au.mutation.SetScenario(s) @@ -660,11 +642,7 @@ func (au *AlertUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (au *AlertUpdate) defaults() { - if _, ok := au.mutation.CreatedAt(); !ok && !au.mutation.CreatedAtCleared() { - v := alert.UpdateDefaultCreatedAt() - au.mutation.SetCreatedAt(v) - } - if _, ok := au.mutation.UpdatedAt(); !ok && !au.mutation.UpdatedAtCleared() { + if _, ok := au.mutation.UpdatedAt(); !ok { v := alert.UpdateDefaultUpdatedAt() au.mutation.SetUpdatedAt(v) } @@ -679,18 +657,9 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := au.mutation.CreatedAt(); ok { - _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - } - if au.mutation.CreatedAtCleared() { - _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) - } if value, ok := au.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if au.mutation.UpdatedAtCleared() { - _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) - } if value, ok := au.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) } @@ -1007,30 +976,12 @@ type AlertUpdateOne struct { mutation *AlertMutation } -// SetCreatedAt sets the "created_at" field. -func (auo *AlertUpdateOne) SetCreatedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetCreatedAt(t) - return auo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (auo *AlertUpdateOne) ClearCreatedAt() *AlertUpdateOne { - auo.mutation.ClearCreatedAt() - return auo -} - // SetUpdatedAt sets the "updated_at" field. func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { auo.mutation.SetUpdatedAt(t) return auo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (auo *AlertUpdateOne) ClearUpdatedAt() *AlertUpdateOne { - auo.mutation.ClearUpdatedAt() - return auo -} - // SetScenario sets the "scenario" field. func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { auo.mutation.SetScenario(s) @@ -1648,11 +1599,7 @@ func (auo *AlertUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (auo *AlertUpdateOne) defaults() { - if _, ok := auo.mutation.CreatedAt(); !ok && !auo.mutation.CreatedAtCleared() { - v := alert.UpdateDefaultCreatedAt() - auo.mutation.SetCreatedAt(v) - } - if _, ok := auo.mutation.UpdatedAt(); !ok && !auo.mutation.UpdatedAtCleared() { + if _, ok := auo.mutation.UpdatedAt(); !ok { v := alert.UpdateDefaultUpdatedAt() auo.mutation.SetUpdatedAt(v) } @@ -1684,18 +1631,9 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } } } - if value, ok := auo.mutation.CreatedAt(); ok { - _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - } - if auo.mutation.CreatedAtCleared() { - _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) - } if value, ok := auo.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if auo.mutation.UpdatedAtCleared() { - _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) - } if value, ok := auo.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 203f49a432d..4c19494e8ae 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -18,9 +18,9 @@ type Bouncer struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at"` // Name holds the value of the "name" field. Name string `json:"name"` // APIKey holds the value of the "api_key" field. @@ -38,7 +38,13 @@ type Bouncer struct { // LastPull holds the value of the "last_pull" field. LastPull time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. - AuthType string `json:"auth_type"` + AuthType string `json:"auth_type"` + // Osname holds the value of the "osname" field. + Osname string `json:"osname,omitempty"` + // Osversion holds the value of the "osversion" field. + Osversion string `json:"osversion,omitempty"` + // Featureflags holds the value of the "featureflags" field. + Featureflags string `json:"featureflags,omitempty"` selectValues sql.SelectValues } @@ -51,7 +57,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case bouncer.FieldID: values[i] = new(sql.NullInt64) - case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType: + case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType, bouncer.FieldOsname, bouncer.FieldOsversion, bouncer.FieldFeatureflags: values[i] = new(sql.NullString) case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull: values[i] = new(sql.NullTime) @@ -80,15 +86,13 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - b.CreatedAt = new(time.Time) - *b.CreatedAt = value.Time + b.CreatedAt = value.Time } case bouncer.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - b.UpdatedAt = new(time.Time) - *b.UpdatedAt = value.Time + b.UpdatedAt = value.Time } case bouncer.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -144,6 +148,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.AuthType = value.String } + case bouncer.FieldOsname: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osname", values[i]) + } else if value.Valid { + b.Osname = value.String + } + case bouncer.FieldOsversion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osversion", values[i]) + } else if value.Valid { + b.Osversion = value.String + } + case bouncer.FieldFeatureflags: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featureflags", values[i]) + } else if value.Valid { + b.Featureflags = value.String + } default: b.selectValues.Set(columns[i], values[i]) } @@ -180,15 +202,11 @@ func (b *Bouncer) String() string { var builder strings.Builder builder.WriteString("Bouncer(") builder.WriteString(fmt.Sprintf("id=%v, ", b.ID)) - if v := b.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(b.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := b.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(b.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(b.Name) @@ -215,6 +233,15 @@ func (b *Bouncer) String() string { builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(b.AuthType) + builder.WriteString(", ") + builder.WriteString("osname=") + builder.WriteString(b.Osname) + builder.WriteString(", ") + builder.WriteString("osversion=") + builder.WriteString(b.Osversion) + builder.WriteString(", ") + builder.WriteString("featureflags=") + builder.WriteString(b.Featureflags) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 24d230d3b54..419f2a209f9 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -35,6 +35,12 @@ const ( FieldLastPull = "last_pull" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" + // FieldOsname holds the string denoting the osname field in the database. + FieldOsname = "osname" + // FieldOsversion holds the string denoting the osversion field in the database. + FieldOsversion = "osversion" + // FieldFeatureflags holds the string denoting the featureflags field in the database. + FieldFeatureflags = "featureflags" // Table holds the table name of the bouncer in the database. Table = "bouncers" ) @@ -53,6 +59,9 @@ var Columns = []string{ FieldUntil, FieldLastPull, FieldAuthType, + FieldOsname, + FieldOsversion, + FieldFeatureflags, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -68,8 +77,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. @@ -146,3 +153,18 @@ func ByLastPull(opts ...sql.OrderTermOption) OrderOption { func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() } + +// ByOsname orders the results by the osname field. +func ByOsname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsname, opts...).ToFunc() +} + +// ByOsversion orders the results by the osversion field. +func ByOsversion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsversion, opts...).ToFunc() +} + +// ByFeatureflags orders the results by the featureflags field. +func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 5bf721dbf51..1c20eb44b4f 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -109,6 +109,21 @@ func AuthType(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } +// Osname applies equality check predicate on the "osname" field. It's identical to OsnameEQ. +func Osname(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsname, v)) +} + +// Osversion applies equality check predicate on the "osversion" field. It's identical to OsversionEQ. +func Osversion(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsversion, v)) +} + +// Featureflags applies equality check predicate on the "featureflags" field. It's identical to FeatureflagsEQ. +func Featureflags(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) @@ -149,16 +164,6 @@ func CreatedAtLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) @@ -199,16 +204,6 @@ func UpdatedAtLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldUpdatedAt)) -} - // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldName, v)) @@ -729,6 +724,231 @@ func AuthTypeContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldAuthType, v)) } +// OsnameEQ applies the EQ predicate on the "osname" field. +func OsnameEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsname, v)) +} + +// OsnameNEQ applies the NEQ predicate on the "osname" field. +func OsnameNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldOsname, v)) +} + +// OsnameIn applies the In predicate on the "osname" field. +func OsnameIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldOsname, vs...)) +} + +// OsnameNotIn applies the NotIn predicate on the "osname" field. +func OsnameNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldOsname, vs...)) +} + +// OsnameGT applies the GT predicate on the "osname" field. +func OsnameGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldOsname, v)) +} + +// OsnameGTE applies the GTE predicate on the "osname" field. +func OsnameGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldOsname, v)) +} + +// OsnameLT applies the LT predicate on the "osname" field. +func OsnameLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldOsname, v)) +} + +// OsnameLTE applies the LTE predicate on the "osname" field. +func OsnameLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldOsname, v)) +} + +// OsnameContains applies the Contains predicate on the "osname" field. +func OsnameContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldOsname, v)) +} + +// OsnameHasPrefix applies the HasPrefix predicate on the "osname" field. +func OsnameHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldOsname, v)) +} + +// OsnameHasSuffix applies the HasSuffix predicate on the "osname" field. +func OsnameHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldOsname, v)) +} + +// OsnameIsNil applies the IsNil predicate on the "osname" field. +func OsnameIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldOsname)) +} + +// OsnameNotNil applies the NotNil predicate on the "osname" field. +func OsnameNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldOsname)) +} + +// OsnameEqualFold applies the EqualFold predicate on the "osname" field. +func OsnameEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldOsname, v)) +} + +// OsnameContainsFold applies the ContainsFold predicate on the "osname" field. +func OsnameContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldOsname, v)) +} + +// OsversionEQ applies the EQ predicate on the "osversion" field. +func OsversionEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsversion, v)) +} + +// OsversionNEQ applies the NEQ predicate on the "osversion" field. +func OsversionNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldOsversion, v)) +} + +// OsversionIn applies the In predicate on the "osversion" field. +func OsversionIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldOsversion, vs...)) +} + +// OsversionNotIn applies the NotIn predicate on the "osversion" field. +func OsversionNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldOsversion, vs...)) +} + +// OsversionGT applies the GT predicate on the "osversion" field. +func OsversionGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldOsversion, v)) +} + +// OsversionGTE applies the GTE predicate on the "osversion" field. +func OsversionGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldOsversion, v)) +} + +// OsversionLT applies the LT predicate on the "osversion" field. +func OsversionLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldOsversion, v)) +} + +// OsversionLTE applies the LTE predicate on the "osversion" field. +func OsversionLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldOsversion, v)) +} + +// OsversionContains applies the Contains predicate on the "osversion" field. +func OsversionContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldOsversion, v)) +} + +// OsversionHasPrefix applies the HasPrefix predicate on the "osversion" field. +func OsversionHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldOsversion, v)) +} + +// OsversionHasSuffix applies the HasSuffix predicate on the "osversion" field. +func OsversionHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldOsversion, v)) +} + +// OsversionIsNil applies the IsNil predicate on the "osversion" field. +func OsversionIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldOsversion)) +} + +// OsversionNotNil applies the NotNil predicate on the "osversion" field. +func OsversionNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldOsversion)) +} + +// OsversionEqualFold applies the EqualFold predicate on the "osversion" field. +func OsversionEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldOsversion, v)) +} + +// OsversionContainsFold applies the ContainsFold predicate on the "osversion" field. +func OsversionContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldOsversion, v)) +} + +// FeatureflagsEQ applies the EQ predicate on the "featureflags" field. +func FeatureflagsEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) +} + +// FeatureflagsNEQ applies the NEQ predicate on the "featureflags" field. +func FeatureflagsNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldFeatureflags, v)) +} + +// FeatureflagsIn applies the In predicate on the "featureflags" field. +func FeatureflagsIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsNotIn applies the NotIn predicate on the "featureflags" field. +func FeatureflagsNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsGT applies the GT predicate on the "featureflags" field. +func FeatureflagsGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldFeatureflags, v)) +} + +// FeatureflagsGTE applies the GTE predicate on the "featureflags" field. +func FeatureflagsGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldFeatureflags, v)) +} + +// FeatureflagsLT applies the LT predicate on the "featureflags" field. +func FeatureflagsLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldFeatureflags, v)) +} + +// FeatureflagsLTE applies the LTE predicate on the "featureflags" field. +func FeatureflagsLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldFeatureflags, v)) +} + +// FeatureflagsContains applies the Contains predicate on the "featureflags" field. +func FeatureflagsContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldFeatureflags, v)) +} + +// FeatureflagsHasPrefix applies the HasPrefix predicate on the "featureflags" field. +func FeatureflagsHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldFeatureflags, v)) +} + +// FeatureflagsHasSuffix applies the HasSuffix predicate on the "featureflags" field. +func FeatureflagsHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldFeatureflags, v)) +} + +// FeatureflagsIsNil applies the IsNil predicate on the "featureflags" field. +func FeatureflagsIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldFeatureflags)) +} + +// FeatureflagsNotNil applies the NotNil predicate on the "featureflags" field. +func FeatureflagsNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldFeatureflags)) +} + +// FeatureflagsEqualFold applies the EqualFold predicate on the "featureflags" field. +func FeatureflagsEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldFeatureflags, v)) +} + +// FeatureflagsContainsFold applies the ContainsFold predicate on the "featureflags" field. +func FeatureflagsContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldFeatureflags, v)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { return predicate.Bouncer(sql.AndPredicates(predicates...)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 3d08277dcfb..b63153c48d2 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -150,6 +150,48 @@ func (bc *BouncerCreate) SetNillableAuthType(s *string) *BouncerCreate { return bc } +// SetOsname sets the "osname" field. +func (bc *BouncerCreate) SetOsname(s string) *BouncerCreate { + bc.mutation.SetOsname(s) + return bc +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableOsname(s *string) *BouncerCreate { + if s != nil { + bc.SetOsname(*s) + } + return bc +} + +// SetOsversion sets the "osversion" field. +func (bc *BouncerCreate) SetOsversion(s string) *BouncerCreate { + bc.mutation.SetOsversion(s) + return bc +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableOsversion(s *string) *BouncerCreate { + if s != nil { + bc.SetOsversion(*s) + } + return bc +} + +// SetFeatureflags sets the "featureflags" field. +func (bc *BouncerCreate) SetFeatureflags(s string) *BouncerCreate { + bc.mutation.SetFeatureflags(s) + return bc +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableFeatureflags(s *string) *BouncerCreate { + if s != nil { + bc.SetFeatureflags(*s) + } + return bc +} + // Mutation returns the BouncerMutation object of the builder. func (bc *BouncerCreate) Mutation() *BouncerMutation { return bc.mutation @@ -213,6 +255,12 @@ func (bc *BouncerCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (bc *BouncerCreate) check() error { + if _, ok := bc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Bouncer.created_at"`)} + } + if _, ok := bc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Bouncer.updated_at"`)} + } if _, ok := bc.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Bouncer.name"`)} } @@ -256,11 +304,11 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { ) if value, ok := bc.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := bc.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := bc.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) @@ -298,6 +346,18 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) _node.AuthType = value } + if value, ok := bc.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + _node.Osname = value + } + if value, ok := bc.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + _node.Osversion = value + } + if value, ok := bc.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + _node.Featureflags = value + } return _node, _spec } diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index b3f5e1a5540..1636a84ce11 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -34,9 +34,11 @@ func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { return bu } -// ClearCreatedAt clears the value of the "created_at" field. -func (bu *BouncerUpdate) ClearCreatedAt() *BouncerUpdate { - bu.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableCreatedAt(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetCreatedAt(*t) + } return bu } @@ -46,12 +48,6 @@ func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { return bu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (bu *BouncerUpdate) ClearUpdatedAt() *BouncerUpdate { - bu.mutation.ClearUpdatedAt() - return bu -} - // SetName sets the "name" field. func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { bu.mutation.SetName(s) @@ -202,6 +198,66 @@ func (bu *BouncerUpdate) SetNillableAuthType(s *string) *BouncerUpdate { return bu } +// SetOsname sets the "osname" field. +func (bu *BouncerUpdate) SetOsname(s string) *BouncerUpdate { + bu.mutation.SetOsname(s) + return bu +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableOsname(s *string) *BouncerUpdate { + if s != nil { + bu.SetOsname(*s) + } + return bu +} + +// ClearOsname clears the value of the "osname" field. +func (bu *BouncerUpdate) ClearOsname() *BouncerUpdate { + bu.mutation.ClearOsname() + return bu +} + +// SetOsversion sets the "osversion" field. +func (bu *BouncerUpdate) SetOsversion(s string) *BouncerUpdate { + bu.mutation.SetOsversion(s) + return bu +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableOsversion(s *string) *BouncerUpdate { + if s != nil { + bu.SetOsversion(*s) + } + return bu +} + +// ClearOsversion clears the value of the "osversion" field. +func (bu *BouncerUpdate) ClearOsversion() *BouncerUpdate { + bu.mutation.ClearOsversion() + return bu +} + +// SetFeatureflags sets the "featureflags" field. +func (bu *BouncerUpdate) SetFeatureflags(s string) *BouncerUpdate { + bu.mutation.SetFeatureflags(s) + return bu +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableFeatureflags(s *string) *BouncerUpdate { + if s != nil { + bu.SetFeatureflags(*s) + } + return bu +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (bu *BouncerUpdate) ClearFeatureflags() *BouncerUpdate { + bu.mutation.ClearFeatureflags() + return bu +} + // Mutation returns the BouncerMutation object of the builder. func (bu *BouncerUpdate) Mutation() *BouncerMutation { return bu.mutation @@ -237,11 +293,7 @@ func (bu *BouncerUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (bu *BouncerUpdate) defaults() { - if _, ok := bu.mutation.CreatedAt(); !ok && !bu.mutation.CreatedAtCleared() { - v := bouncer.UpdateDefaultCreatedAt() - bu.mutation.SetCreatedAt(v) - } - if _, ok := bu.mutation.UpdatedAt(); !ok && !bu.mutation.UpdatedAtCleared() { + if _, ok := bu.mutation.UpdatedAt(); !ok { v := bouncer.UpdateDefaultUpdatedAt() bu.mutation.SetUpdatedAt(v) } @@ -259,15 +311,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } - if bu.mutation.CreatedAtCleared() { - _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) - } if value, ok := bu.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if bu.mutation.UpdatedAtCleared() { - _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) - } if value, ok := bu.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) } @@ -307,6 +353,24 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } + if value, ok := bu.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + } + if bu.mutation.OsnameCleared() { + _spec.ClearField(bouncer.FieldOsname, field.TypeString) + } + if value, ok := bu.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + } + if bu.mutation.OsversionCleared() { + _spec.ClearField(bouncer.FieldOsversion, field.TypeString) + } + if value, ok := bu.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + } + if bu.mutation.FeatureflagsCleared() { + _spec.ClearField(bouncer.FieldFeatureflags, field.TypeString) + } if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{bouncer.Label} @@ -333,9 +397,11 @@ func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { return buo } -// ClearCreatedAt clears the value of the "created_at" field. -func (buo *BouncerUpdateOne) ClearCreatedAt() *BouncerUpdateOne { - buo.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableCreatedAt(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetCreatedAt(*t) + } return buo } @@ -345,12 +411,6 @@ func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { return buo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (buo *BouncerUpdateOne) ClearUpdatedAt() *BouncerUpdateOne { - buo.mutation.ClearUpdatedAt() - return buo -} - // SetName sets the "name" field. func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { buo.mutation.SetName(s) @@ -501,6 +561,66 @@ func (buo *BouncerUpdateOne) SetNillableAuthType(s *string) *BouncerUpdateOne { return buo } +// SetOsname sets the "osname" field. +func (buo *BouncerUpdateOne) SetOsname(s string) *BouncerUpdateOne { + buo.mutation.SetOsname(s) + return buo +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableOsname(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetOsname(*s) + } + return buo +} + +// ClearOsname clears the value of the "osname" field. +func (buo *BouncerUpdateOne) ClearOsname() *BouncerUpdateOne { + buo.mutation.ClearOsname() + return buo +} + +// SetOsversion sets the "osversion" field. +func (buo *BouncerUpdateOne) SetOsversion(s string) *BouncerUpdateOne { + buo.mutation.SetOsversion(s) + return buo +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableOsversion(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetOsversion(*s) + } + return buo +} + +// ClearOsversion clears the value of the "osversion" field. +func (buo *BouncerUpdateOne) ClearOsversion() *BouncerUpdateOne { + buo.mutation.ClearOsversion() + return buo +} + +// SetFeatureflags sets the "featureflags" field. +func (buo *BouncerUpdateOne) SetFeatureflags(s string) *BouncerUpdateOne { + buo.mutation.SetFeatureflags(s) + return buo +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableFeatureflags(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetFeatureflags(*s) + } + return buo +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (buo *BouncerUpdateOne) ClearFeatureflags() *BouncerUpdateOne { + buo.mutation.ClearFeatureflags() + return buo +} + // Mutation returns the BouncerMutation object of the builder. func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { return buo.mutation @@ -549,11 +669,7 @@ func (buo *BouncerUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (buo *BouncerUpdateOne) defaults() { - if _, ok := buo.mutation.CreatedAt(); !ok && !buo.mutation.CreatedAtCleared() { - v := bouncer.UpdateDefaultCreatedAt() - buo.mutation.SetCreatedAt(v) - } - if _, ok := buo.mutation.UpdatedAt(); !ok && !buo.mutation.UpdatedAtCleared() { + if _, ok := buo.mutation.UpdatedAt(); !ok { v := bouncer.UpdateDefaultUpdatedAt() buo.mutation.SetUpdatedAt(v) } @@ -588,15 +704,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } - if buo.mutation.CreatedAtCleared() { - _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) - } if value, ok := buo.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if buo.mutation.UpdatedAtCleared() { - _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) - } if value, ok := buo.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) } @@ -636,6 +746,24 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } + if value, ok := buo.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + } + if buo.mutation.OsnameCleared() { + _spec.ClearField(bouncer.FieldOsname, field.TypeString) + } + if value, ok := buo.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + } + if buo.mutation.OsversionCleared() { + _spec.ClearField(bouncer.FieldOsversion, field.TypeString) + } + if value, ok := buo.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + } + if buo.mutation.FeatureflagsCleared() { + _spec.ClearField(bouncer.FieldFeatureflags, field.TypeString) + } _node = &Bouncer{config: buo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 5318109ed42..59686102ebe 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) // Client is the client that holds all ent builders. @@ -46,6 +47,8 @@ type Client struct { Machine *MachineClient // Meta is the client for interacting with the Meta builders. Meta *MetaClient + // Metric is the client for interacting with the Metric builders. + Metric *MetricClient } // NewClient creates a new client configured with the given options. @@ -65,6 +68,7 @@ func (c *Client) init() { c.Lock = NewLockClient(c.config) c.Machine = NewMachineClient(c.config) c.Meta = NewMetaClient(c.config) + c.Metric = NewMetricClient(c.config) } type ( @@ -165,6 +169,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -192,6 +197,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -222,7 +228,7 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, + c.Meta, c.Metric, } { n.Use(hooks...) } @@ -233,7 +239,7 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, + c.Meta, c.Metric, } { n.Intercept(interceptors...) } @@ -258,6 +264,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Machine.mutate(ctx, m) case *MetaMutation: return c.Meta.mutate(ctx, m) + case *MetricMutation: + return c.Metric.mutate(ctx, m) default: return nil, fmt.Errorf("ent: unknown mutation type %T", m) } @@ -1455,13 +1463,147 @@ func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) } } +// MetricClient is a client for the Metric schema. +type MetricClient struct { + config +} + +// NewMetricClient returns a client for the Metric from the given config. +func NewMetricClient(c config) *MetricClient { + return &MetricClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `metric.Hooks(f(g(h())))`. +func (c *MetricClient) Use(hooks ...Hook) { + c.hooks.Metric = append(c.hooks.Metric, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `metric.Intercept(f(g(h())))`. +func (c *MetricClient) Intercept(interceptors ...Interceptor) { + c.inters.Metric = append(c.inters.Metric, interceptors...) +} + +// Create returns a builder for creating a Metric entity. +func (c *MetricClient) Create() *MetricCreate { + mutation := newMetricMutation(c.config, OpCreate) + return &MetricCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Metric entities. +func (c *MetricClient) CreateBulk(builders ...*MetricCreate) *MetricCreateBulk { + return &MetricCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MetricClient) MapCreateBulk(slice any, setFunc func(*MetricCreate, int)) *MetricCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MetricCreateBulk{err: fmt.Errorf("calling to MetricClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MetricCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MetricCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Metric. +func (c *MetricClient) Update() *MetricUpdate { + mutation := newMetricMutation(c.config, OpUpdate) + return &MetricUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MetricClient) UpdateOne(m *Metric) *MetricUpdateOne { + mutation := newMetricMutation(c.config, OpUpdateOne, withMetric(m)) + return &MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MetricClient) UpdateOneID(id int) *MetricUpdateOne { + mutation := newMetricMutation(c.config, OpUpdateOne, withMetricID(id)) + return &MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Metric. +func (c *MetricClient) Delete() *MetricDelete { + mutation := newMetricMutation(c.config, OpDelete) + return &MetricDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MetricClient) DeleteOne(m *Metric) *MetricDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *MetricClient) DeleteOneID(id int) *MetricDeleteOne { + builder := c.Delete().Where(metric.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MetricDeleteOne{builder} +} + +// Query returns a query builder for Metric. +func (c *MetricClient) Query() *MetricQuery { + return &MetricQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeMetric}, + inters: c.Interceptors(), + } +} + +// Get returns a Metric entity by its id. +func (c *MetricClient) Get(ctx context.Context, id int) (*Metric, error) { + return c.Query().Where(metric.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MetricClient) GetX(ctx context.Context, id int) *Metric { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *MetricClient) Hooks() []Hook { + return c.hooks.Metric +} + +// Interceptors returns the client interceptors. +func (c *MetricClient) Interceptors() []Interceptor { + return c.inters.Metric +} + +func (c *MetricClient) mutate(ctx context.Context, m *MetricMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MetricCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MetricUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MetricDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Metric mutation op: %q", m.Op()) + } +} + // hooks and interceptors per client, for fast access. type ( hooks struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta []ent.Hook + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, + Metric []ent.Hook } inters struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, - Meta []ent.Interceptor + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, + Metric []ent.Interceptor } ) diff --git a/pkg/database/ent/configitem.go b/pkg/database/ent/configitem.go index 467e54386f6..bdf23ef4948 100644 --- a/pkg/database/ent/configitem.go +++ b/pkg/database/ent/configitem.go @@ -18,9 +18,9 @@ type ConfigItem struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at"` // Name holds the value of the "name" field. Name string `json:"name"` // Value holds the value of the "value" field. @@ -64,15 +64,13 @@ func (ci *ConfigItem) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - ci.CreatedAt = new(time.Time) - *ci.CreatedAt = value.Time + ci.CreatedAt = value.Time } case configitem.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - ci.UpdatedAt = new(time.Time) - *ci.UpdatedAt = value.Time + ci.UpdatedAt = value.Time } case configitem.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -122,15 +120,11 @@ func (ci *ConfigItem) String() string { var builder strings.Builder builder.WriteString("ConfigItem(") builder.WriteString(fmt.Sprintf("id=%v, ", ci.ID)) - if v := ci.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(ci.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := ci.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(ci.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(ci.Name) diff --git a/pkg/database/ent/configitem/configitem.go b/pkg/database/ent/configitem/configitem.go index a6ff6c32d57..611d81a3960 100644 --- a/pkg/database/ent/configitem/configitem.go +++ b/pkg/database/ent/configitem/configitem.go @@ -47,8 +47,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/configitem/where.go b/pkg/database/ent/configitem/where.go index 767f0b420f1..48ae792fd72 100644 --- a/pkg/database/ent/configitem/where.go +++ b/pkg/database/ent/configitem/where.go @@ -114,16 +114,6 @@ func CreatedAtLTE(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) @@ -164,16 +154,6 @@ func UpdatedAtLTE(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldNotNull(FieldUpdatedAt)) -} - // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) diff --git a/pkg/database/ent/configitem_create.go b/pkg/database/ent/configitem_create.go index 19e73dea41c..a2679927aee 100644 --- a/pkg/database/ent/configitem_create.go +++ b/pkg/database/ent/configitem_create.go @@ -107,6 +107,12 @@ func (cic *ConfigItemCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (cic *ConfigItemCreate) check() error { + if _, ok := cic.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ConfigItem.created_at"`)} + } + if _, ok := cic.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ConfigItem.updated_at"`)} + } if _, ok := cic.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ConfigItem.name"`)} } @@ -141,11 +147,11 @@ func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { ) if value, ok := cic.mutation.CreatedAt(); ok { _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := cic.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := cic.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index 11fb0755191..d4f1f15d23a 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -28,30 +28,12 @@ func (ciu *ConfigItemUpdate) Where(ps ...predicate.ConfigItem) *ConfigItemUpdate return ciu } -// SetCreatedAt sets the "created_at" field. -func (ciu *ConfigItemUpdate) SetCreatedAt(t time.Time) *ConfigItemUpdate { - ciu.mutation.SetCreatedAt(t) - return ciu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (ciu *ConfigItemUpdate) ClearCreatedAt() *ConfigItemUpdate { - ciu.mutation.ClearCreatedAt() - return ciu -} - // SetUpdatedAt sets the "updated_at" field. func (ciu *ConfigItemUpdate) SetUpdatedAt(t time.Time) *ConfigItemUpdate { ciu.mutation.SetUpdatedAt(t) return ciu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (ciu *ConfigItemUpdate) ClearUpdatedAt() *ConfigItemUpdate { - ciu.mutation.ClearUpdatedAt() - return ciu -} - // SetName sets the "name" field. func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { ciu.mutation.SetName(s) @@ -115,11 +97,7 @@ func (ciu *ConfigItemUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (ciu *ConfigItemUpdate) defaults() { - if _, ok := ciu.mutation.CreatedAt(); !ok && !ciu.mutation.CreatedAtCleared() { - v := configitem.UpdateDefaultCreatedAt() - ciu.mutation.SetCreatedAt(v) - } - if _, ok := ciu.mutation.UpdatedAt(); !ok && !ciu.mutation.UpdatedAtCleared() { + if _, ok := ciu.mutation.UpdatedAt(); !ok { v := configitem.UpdateDefaultUpdatedAt() ciu.mutation.SetUpdatedAt(v) } @@ -134,18 +112,9 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := ciu.mutation.CreatedAt(); ok { - _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - } - if ciu.mutation.CreatedAtCleared() { - _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) - } if value, ok := ciu.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if ciu.mutation.UpdatedAtCleared() { - _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) - } if value, ok := ciu.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) } @@ -172,30 +141,12 @@ type ConfigItemUpdateOne struct { mutation *ConfigItemMutation } -// SetCreatedAt sets the "created_at" field. -func (ciuo *ConfigItemUpdateOne) SetCreatedAt(t time.Time) *ConfigItemUpdateOne { - ciuo.mutation.SetCreatedAt(t) - return ciuo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (ciuo *ConfigItemUpdateOne) ClearCreatedAt() *ConfigItemUpdateOne { - ciuo.mutation.ClearCreatedAt() - return ciuo -} - // SetUpdatedAt sets the "updated_at" field. func (ciuo *ConfigItemUpdateOne) SetUpdatedAt(t time.Time) *ConfigItemUpdateOne { ciuo.mutation.SetUpdatedAt(t) return ciuo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (ciuo *ConfigItemUpdateOne) ClearUpdatedAt() *ConfigItemUpdateOne { - ciuo.mutation.ClearUpdatedAt() - return ciuo -} - // SetName sets the "name" field. func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { ciuo.mutation.SetName(s) @@ -272,11 +223,7 @@ func (ciuo *ConfigItemUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (ciuo *ConfigItemUpdateOne) defaults() { - if _, ok := ciuo.mutation.CreatedAt(); !ok && !ciuo.mutation.CreatedAtCleared() { - v := configitem.UpdateDefaultCreatedAt() - ciuo.mutation.SetCreatedAt(v) - } - if _, ok := ciuo.mutation.UpdatedAt(); !ok && !ciuo.mutation.UpdatedAtCleared() { + if _, ok := ciuo.mutation.UpdatedAt(); !ok { v := configitem.UpdateDefaultUpdatedAt() ciuo.mutation.SetUpdatedAt(v) } @@ -308,18 +255,9 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } } } - if value, ok := ciuo.mutation.CreatedAt(); ok { - _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - } - if ciuo.mutation.CreatedAtCleared() { - _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) - } if value, ok := ciuo.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if ciuo.mutation.UpdatedAtCleared() { - _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) - } if value, ok := ciuo.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) } diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index 8a08bc1dfd4..1cc0df4c784 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -19,9 +19,9 @@ type Decision struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Until holds the value of the "until" field. Until *time.Time `json:"until,omitempty"` // Scenario holds the value of the "scenario" field. @@ -116,15 +116,13 @@ func (d *Decision) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - d.CreatedAt = new(time.Time) - *d.CreatedAt = value.Time + d.CreatedAt = value.Time } case decision.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - d.UpdatedAt = new(time.Time) - *d.UpdatedAt = value.Time + d.UpdatedAt = value.Time } case decision.FieldUntil: if value, ok := values[i].(*sql.NullTime); !ok { @@ -252,15 +250,11 @@ func (d *Decision) String() string { var builder strings.Builder builder.WriteString("Decision(") builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) - if v := d.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := d.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := d.Until; v != nil { builder.WriteString("until=") diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go index d9f67623bd8..38c9721db48 100644 --- a/pkg/database/ent/decision/decision.go +++ b/pkg/database/ent/decision/decision.go @@ -93,8 +93,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go index 36374f5714d..99a1889e63e 100644 --- a/pkg/database/ent/decision/where.go +++ b/pkg/database/ent/decision/where.go @@ -175,16 +175,6 @@ func CreatedAtLTE(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Decision { - return predicate.Decision(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Decision { - return predicate.Decision(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) @@ -225,16 +215,6 @@ func UpdatedAtLTE(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Decision { - return predicate.Decision(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Decision { - return predicate.Decision(sql.FieldNotNull(FieldUpdatedAt)) -} - // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldEQ(FieldUntil, v)) diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go index 43a28c53114..f30d5452120 100644 --- a/pkg/database/ent/decision_create.go +++ b/pkg/database/ent/decision_create.go @@ -275,6 +275,12 @@ func (dc *DecisionCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (dc *DecisionCreate) check() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Decision.created_at"`)} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Decision.updated_at"`)} + } if _, ok := dc.mutation.Scenario(); !ok { return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Decision.scenario"`)} } @@ -321,11 +327,11 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { ) if value, ok := dc.mutation.CreatedAt(); ok { _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := dc.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := dc.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 182457e9f63..1bcb42f8c1f 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -29,30 +29,12 @@ func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate { return du } -// SetCreatedAt sets the "created_at" field. -func (du *DecisionUpdate) SetCreatedAt(t time.Time) *DecisionUpdate { - du.mutation.SetCreatedAt(t) - return du -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (du *DecisionUpdate) ClearCreatedAt() *DecisionUpdate { - du.mutation.ClearCreatedAt() - return du -} - // SetUpdatedAt sets the "updated_at" field. func (du *DecisionUpdate) SetUpdatedAt(t time.Time) *DecisionUpdate { du.mutation.SetUpdatedAt(t) return du } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (du *DecisionUpdate) ClearUpdatedAt() *DecisionUpdate { - du.mutation.ClearUpdatedAt() - return du -} - // SetUntil sets the "until" field. func (du *DecisionUpdate) SetUntil(t time.Time) *DecisionUpdate { du.mutation.SetUntil(t) @@ -392,11 +374,7 @@ func (du *DecisionUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (du *DecisionUpdate) defaults() { - if _, ok := du.mutation.CreatedAt(); !ok && !du.mutation.CreatedAtCleared() { - v := decision.UpdateDefaultCreatedAt() - du.mutation.SetCreatedAt(v) - } - if _, ok := du.mutation.UpdatedAt(); !ok && !du.mutation.UpdatedAtCleared() { + if _, ok := du.mutation.UpdatedAt(); !ok { v := decision.UpdateDefaultUpdatedAt() du.mutation.SetUpdatedAt(v) } @@ -411,18 +389,9 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := du.mutation.CreatedAt(); ok { - _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - } - if du.mutation.CreatedAtCleared() { - _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) - } if value, ok := du.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } - if du.mutation.UpdatedAtCleared() { - _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) - } if value, ok := du.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) } @@ -547,30 +516,12 @@ type DecisionUpdateOne struct { mutation *DecisionMutation } -// SetCreatedAt sets the "created_at" field. -func (duo *DecisionUpdateOne) SetCreatedAt(t time.Time) *DecisionUpdateOne { - duo.mutation.SetCreatedAt(t) - return duo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (duo *DecisionUpdateOne) ClearCreatedAt() *DecisionUpdateOne { - duo.mutation.ClearCreatedAt() - return duo -} - // SetUpdatedAt sets the "updated_at" field. func (duo *DecisionUpdateOne) SetUpdatedAt(t time.Time) *DecisionUpdateOne { duo.mutation.SetUpdatedAt(t) return duo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (duo *DecisionUpdateOne) ClearUpdatedAt() *DecisionUpdateOne { - duo.mutation.ClearUpdatedAt() - return duo -} - // SetUntil sets the "until" field. func (duo *DecisionUpdateOne) SetUntil(t time.Time) *DecisionUpdateOne { duo.mutation.SetUntil(t) @@ -923,11 +874,7 @@ func (duo *DecisionUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (duo *DecisionUpdateOne) defaults() { - if _, ok := duo.mutation.CreatedAt(); !ok && !duo.mutation.CreatedAtCleared() { - v := decision.UpdateDefaultCreatedAt() - duo.mutation.SetCreatedAt(v) - } - if _, ok := duo.mutation.UpdatedAt(); !ok && !duo.mutation.UpdatedAtCleared() { + if _, ok := duo.mutation.UpdatedAt(); !ok { v := decision.UpdateDefaultUpdatedAt() duo.mutation.SetUpdatedAt(v) } @@ -959,18 +906,9 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } } } - if value, ok := duo.mutation.CreatedAt(); ok { - _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - } - if duo.mutation.CreatedAtCleared() { - _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) - } if value, ok := duo.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } - if duo.mutation.UpdatedAtCleared() { - _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) - } if value, ok := duo.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) } diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index cb98ee9301c..2a5ad188197 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) // ent aliases to avoid import conflicts in user's code. @@ -88,6 +89,7 @@ func checkColumn(table, column string) error { lock.Table: lock.ValidColumn, machine.Table: machine.ValidColumn, meta.Table: meta.ValidColumn, + metric.Table: metric.ValidColumn, }) }) return columnCheck(table, column) diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index df4a2d10c8b..10e6d01c9d5 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -19,9 +19,9 @@ type Event struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Time holds the value of the "time" field. Time time.Time `json:"time,omitempty"` // Serialized holds the value of the "serialized" field. @@ -92,15 +92,13 @@ func (e *Event) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - e.CreatedAt = new(time.Time) - *e.CreatedAt = value.Time + e.CreatedAt = value.Time } case event.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - e.UpdatedAt = new(time.Time) - *e.UpdatedAt = value.Time + e.UpdatedAt = value.Time } case event.FieldTime: if value, ok := values[i].(*sql.NullTime); !ok { @@ -161,15 +159,11 @@ func (e *Event) String() string { var builder strings.Builder builder.WriteString("Event(") builder.WriteString(fmt.Sprintf("id=%v, ", e.ID)) - if v := e.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(e.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := e.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(e.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("time=") builder.WriteString(e.Time.Format(time.ANSIC)) diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go index 48f5a355824..c975a612669 100644 --- a/pkg/database/ent/event/event.go +++ b/pkg/database/ent/event/event.go @@ -60,8 +60,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go index 238bea988bd..d420b125026 100644 --- a/pkg/database/ent/event/where.go +++ b/pkg/database/ent/event/where.go @@ -120,16 +120,6 @@ func CreatedAtLTE(v time.Time) predicate.Event { return predicate.Event(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Event { - return predicate.Event(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Event { - return predicate.Event(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Event { return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) @@ -170,16 +160,6 @@ func UpdatedAtLTE(v time.Time) predicate.Event { return predicate.Event(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Event { - return predicate.Event(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Event { - return predicate.Event(sql.FieldNotNull(FieldUpdatedAt)) -} - // TimeEQ applies the EQ predicate on the "time" field. func TimeEQ(v time.Time) predicate.Event { return predicate.Event(sql.FieldEQ(FieldTime, v)) diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go index 98194f2fd33..36747babe47 100644 --- a/pkg/database/ent/event_create.go +++ b/pkg/database/ent/event_create.go @@ -141,6 +141,12 @@ func (ec *EventCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ec *EventCreate) check() error { + if _, ok := ec.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Event.created_at"`)} + } + if _, ok := ec.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Event.updated_at"`)} + } if _, ok := ec.mutation.Time(); !ok { return &ValidationError{Name: "time", err: errors.New(`ent: missing required field "Event.time"`)} } @@ -180,11 +186,11 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { ) if value, ok := ec.mutation.CreatedAt(); ok { _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := ec.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := ec.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index a06178f79af..0bc8a7f9243 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -29,30 +29,12 @@ func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate { return eu } -// SetCreatedAt sets the "created_at" field. -func (eu *EventUpdate) SetCreatedAt(t time.Time) *EventUpdate { - eu.mutation.SetCreatedAt(t) - return eu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (eu *EventUpdate) ClearCreatedAt() *EventUpdate { - eu.mutation.ClearCreatedAt() - return eu -} - // SetUpdatedAt sets the "updated_at" field. func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { eu.mutation.SetUpdatedAt(t) return eu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (eu *EventUpdate) ClearUpdatedAt() *EventUpdate { - eu.mutation.ClearUpdatedAt() - return eu -} - // SetTime sets the "time" field. func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { eu.mutation.SetTime(t) @@ -161,11 +143,7 @@ func (eu *EventUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (eu *EventUpdate) defaults() { - if _, ok := eu.mutation.CreatedAt(); !ok && !eu.mutation.CreatedAtCleared() { - v := event.UpdateDefaultCreatedAt() - eu.mutation.SetCreatedAt(v) - } - if _, ok := eu.mutation.UpdatedAt(); !ok && !eu.mutation.UpdatedAtCleared() { + if _, ok := eu.mutation.UpdatedAt(); !ok { v := event.UpdateDefaultUpdatedAt() eu.mutation.SetUpdatedAt(v) } @@ -193,18 +171,9 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := eu.mutation.CreatedAt(); ok { - _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - } - if eu.mutation.CreatedAtCleared() { - _spec.ClearField(event.FieldCreatedAt, field.TypeTime) - } if value, ok := eu.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if eu.mutation.UpdatedAtCleared() { - _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) - } if value, ok := eu.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) } @@ -260,30 +229,12 @@ type EventUpdateOne struct { mutation *EventMutation } -// SetCreatedAt sets the "created_at" field. -func (euo *EventUpdateOne) SetCreatedAt(t time.Time) *EventUpdateOne { - euo.mutation.SetCreatedAt(t) - return euo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (euo *EventUpdateOne) ClearCreatedAt() *EventUpdateOne { - euo.mutation.ClearCreatedAt() - return euo -} - // SetUpdatedAt sets the "updated_at" field. func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { euo.mutation.SetUpdatedAt(t) return euo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (euo *EventUpdateOne) ClearUpdatedAt() *EventUpdateOne { - euo.mutation.ClearUpdatedAt() - return euo -} - // SetTime sets the "time" field. func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { euo.mutation.SetTime(t) @@ -405,11 +356,7 @@ func (euo *EventUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (euo *EventUpdateOne) defaults() { - if _, ok := euo.mutation.CreatedAt(); !ok && !euo.mutation.CreatedAtCleared() { - v := event.UpdateDefaultCreatedAt() - euo.mutation.SetCreatedAt(v) - } - if _, ok := euo.mutation.UpdatedAt(); !ok && !euo.mutation.UpdatedAtCleared() { + if _, ok := euo.mutation.UpdatedAt(); !ok { v := event.UpdateDefaultUpdatedAt() euo.mutation.SetUpdatedAt(v) } @@ -454,18 +401,9 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } } } - if value, ok := euo.mutation.CreatedAt(); ok { - _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - } - if euo.mutation.CreatedAtCleared() { - _spec.ClearField(event.FieldCreatedAt, field.TypeTime) - } if value, ok := euo.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if euo.mutation.UpdatedAtCleared() { - _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) - } if value, ok := euo.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) } diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index fdc31539679..62cc07820d0 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -105,6 +105,18 @@ func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) } +// The MetricFunc type is an adapter to allow the use of ordinary +// function as Metric mutator. +type MetricFunc func(context.Context, *ent.MetricMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MetricFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.MetricMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetricMutation", m) +} + // Condition is a hook condition function. type Condition func(context.Context, ent.Mutation) bool diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index dc61dfdfde1..988363abd17 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -28,20 +28,6 @@ func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { return lu } -// SetName sets the "name" field. -func (lu *LockUpdate) SetName(s string) *LockUpdate { - lu.mutation.SetName(s) - return lu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (lu *LockUpdate) SetNillableName(s *string) *LockUpdate { - if s != nil { - lu.SetName(*s) - } - return lu -} - // SetCreatedAt sets the "created_at" field. func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { lu.mutation.SetCreatedAt(t) @@ -97,9 +83,6 @@ func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := lu.mutation.Name(); ok { - _spec.SetField(lock.FieldName, field.TypeString, value) - } if value, ok := lu.mutation.CreatedAt(); ok { _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) } @@ -123,20 +106,6 @@ type LockUpdateOne struct { mutation *LockMutation } -// SetName sets the "name" field. -func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { - luo.mutation.SetName(s) - return luo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (luo *LockUpdateOne) SetNillableName(s *string) *LockUpdateOne { - if s != nil { - luo.SetName(*s) - } - return luo -} - // SetCreatedAt sets the "created_at" field. func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { luo.mutation.SetCreatedAt(t) @@ -222,9 +191,6 @@ func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) } } } - if value, ok := luo.mutation.Name(); ok { - _spec.SetField(lock.FieldName, field.TypeString, value) - } if value, ok := luo.mutation.CreatedAt(); ok { _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) } diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 346a8d084ba..f2cc45c6abc 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -3,6 +3,7 @@ package ent import ( + "encoding/json" "fmt" "strings" "time" @@ -10,6 +11,7 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/models" ) // Machine is the model entity for the Machine schema. @@ -18,9 +20,9 @@ type Machine struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // LastPush holds the value of the "last_push" field. LastPush *time.Time `json:"last_push,omitempty"` // LastHeartbeat holds the value of the "last_heartbeat" field. @@ -41,6 +43,14 @@ type Machine struct { Status string `json:"status,omitempty"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` + // Osname holds the value of the "osname" field. + Osname string `json:"osname,omitempty"` + // Osversion holds the value of the "osversion" field. + Osversion string `json:"osversion,omitempty"` + // Featureflags holds the value of the "featureflags" field. + Featureflags string `json:"featureflags,omitempty"` + // Hubstate holds the value of the "hubstate" field. + Hubstate *models.HubItems `json:"hubstate,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MachineQuery when eager-loading is set. Edges MachineEdges `json:"edges"` @@ -70,11 +80,13 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case machine.FieldHubstate: + values[i] = new([]byte) case machine.FieldIsValidated: values[i] = new(sql.NullBool) case machine.FieldID: values[i] = new(sql.NullInt64) - case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType: + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: values[i] = new(sql.NullString) case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) @@ -103,15 +115,13 @@ func (m *Machine) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = new(time.Time) - *m.CreatedAt = value.Time + m.CreatedAt = value.Time } case machine.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = new(time.Time) - *m.UpdatedAt = value.Time + m.UpdatedAt = value.Time } case machine.FieldLastPush: if value, ok := values[i].(*sql.NullTime); !ok { @@ -175,6 +185,32 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.AuthType = value.String } + case machine.FieldOsname: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osname", values[i]) + } else if value.Valid { + m.Osname = value.String + } + case machine.FieldOsversion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osversion", values[i]) + } else if value.Valid { + m.Osversion = value.String + } + case machine.FieldFeatureflags: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featureflags", values[i]) + } else if value.Valid { + m.Featureflags = value.String + } + case machine.FieldHubstate: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field hubstate", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &m.Hubstate); err != nil { + return fmt.Errorf("unmarshal field hubstate: %w", err) + } + } default: m.selectValues.Set(columns[i], values[i]) } @@ -216,15 +252,11 @@ func (m *Machine) String() string { var builder strings.Builder builder.WriteString("Machine(") builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) - if v := m.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := m.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.LastPush; v != nil { builder.WriteString("last_push=") @@ -258,6 +290,18 @@ func (m *Machine) String() string { builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(m.AuthType) + builder.WriteString(", ") + builder.WriteString("osname=") + builder.WriteString(m.Osname) + builder.WriteString(", ") + builder.WriteString("osversion=") + builder.WriteString(m.Osversion) + builder.WriteString(", ") + builder.WriteString("featureflags=") + builder.WriteString(m.Featureflags) + builder.WriteString(", ") + builder.WriteString("hubstate=") + builder.WriteString(fmt.Sprintf("%v", m.Hubstate)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 5456935e04c..39ea01b3c16 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -38,6 +38,14 @@ const ( FieldStatus = "status" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" + // FieldOsname holds the string denoting the osname field in the database. + FieldOsname = "osname" + // FieldOsversion holds the string denoting the osversion field in the database. + FieldOsversion = "osversion" + // FieldFeatureflags holds the string denoting the featureflags field in the database. + FieldFeatureflags = "featureflags" + // FieldHubstate holds the string denoting the hubstate field in the database. + FieldHubstate = "hubstate" // EdgeAlerts holds the string denoting the alerts edge name in mutations. EdgeAlerts = "alerts" // Table holds the table name of the machine in the database. @@ -66,6 +74,10 @@ var Columns = []string{ FieldIsValidated, FieldStatus, FieldAuthType, + FieldOsname, + FieldOsversion, + FieldFeatureflags, + FieldHubstate, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -81,8 +93,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. @@ -171,6 +181,21 @@ func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() } +// ByOsname orders the results by the osname field. +func ByOsname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsname, opts...).ToFunc() +} + +// ByOsversion orders the results by the osversion field. +func ByOsversion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsversion, opts...).ToFunc() +} + +// ByFeatureflags orders the results by the featureflags field. +func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() +} + // ByAlertsCount orders the results by alerts count. func ByAlertsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index e9d00e7e01e..c84ad30f240 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -115,6 +115,21 @@ func AuthType(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } +// Osname applies equality check predicate on the "osname" field. It's identical to OsnameEQ. +func Osname(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsname, v)) +} + +// Osversion applies equality check predicate on the "osversion" field. It's identical to OsversionEQ. +func Osversion(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsversion, v)) +} + +// Featureflags applies equality check predicate on the "featureflags" field. It's identical to FeatureflagsEQ. +func Featureflags(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldFeatureflags, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) @@ -155,16 +170,6 @@ func CreatedAtLTE(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) @@ -205,16 +210,6 @@ func UpdatedAtLTE(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldUpdatedAt)) -} - // LastPushEQ applies the EQ predicate on the "last_push" field. func LastPushEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) @@ -810,6 +805,241 @@ func AuthTypeContainsFold(v string) predicate.Machine { return predicate.Machine(sql.FieldContainsFold(FieldAuthType, v)) } +// OsnameEQ applies the EQ predicate on the "osname" field. +func OsnameEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsname, v)) +} + +// OsnameNEQ applies the NEQ predicate on the "osname" field. +func OsnameNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldOsname, v)) +} + +// OsnameIn applies the In predicate on the "osname" field. +func OsnameIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldOsname, vs...)) +} + +// OsnameNotIn applies the NotIn predicate on the "osname" field. +func OsnameNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldOsname, vs...)) +} + +// OsnameGT applies the GT predicate on the "osname" field. +func OsnameGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldOsname, v)) +} + +// OsnameGTE applies the GTE predicate on the "osname" field. +func OsnameGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldOsname, v)) +} + +// OsnameLT applies the LT predicate on the "osname" field. +func OsnameLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldOsname, v)) +} + +// OsnameLTE applies the LTE predicate on the "osname" field. +func OsnameLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldOsname, v)) +} + +// OsnameContains applies the Contains predicate on the "osname" field. +func OsnameContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldOsname, v)) +} + +// OsnameHasPrefix applies the HasPrefix predicate on the "osname" field. +func OsnameHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldOsname, v)) +} + +// OsnameHasSuffix applies the HasSuffix predicate on the "osname" field. +func OsnameHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldOsname, v)) +} + +// OsnameIsNil applies the IsNil predicate on the "osname" field. +func OsnameIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldOsname)) +} + +// OsnameNotNil applies the NotNil predicate on the "osname" field. +func OsnameNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldOsname)) +} + +// OsnameEqualFold applies the EqualFold predicate on the "osname" field. +func OsnameEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldOsname, v)) +} + +// OsnameContainsFold applies the ContainsFold predicate on the "osname" field. +func OsnameContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldOsname, v)) +} + +// OsversionEQ applies the EQ predicate on the "osversion" field. +func OsversionEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsversion, v)) +} + +// OsversionNEQ applies the NEQ predicate on the "osversion" field. +func OsversionNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldOsversion, v)) +} + +// OsversionIn applies the In predicate on the "osversion" field. +func OsversionIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldOsversion, vs...)) +} + +// OsversionNotIn applies the NotIn predicate on the "osversion" field. +func OsversionNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldOsversion, vs...)) +} + +// OsversionGT applies the GT predicate on the "osversion" field. +func OsversionGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldOsversion, v)) +} + +// OsversionGTE applies the GTE predicate on the "osversion" field. +func OsversionGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldOsversion, v)) +} + +// OsversionLT applies the LT predicate on the "osversion" field. +func OsversionLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldOsversion, v)) +} + +// OsversionLTE applies the LTE predicate on the "osversion" field. +func OsversionLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldOsversion, v)) +} + +// OsversionContains applies the Contains predicate on the "osversion" field. +func OsversionContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldOsversion, v)) +} + +// OsversionHasPrefix applies the HasPrefix predicate on the "osversion" field. +func OsversionHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldOsversion, v)) +} + +// OsversionHasSuffix applies the HasSuffix predicate on the "osversion" field. +func OsversionHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldOsversion, v)) +} + +// OsversionIsNil applies the IsNil predicate on the "osversion" field. +func OsversionIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldOsversion)) +} + +// OsversionNotNil applies the NotNil predicate on the "osversion" field. +func OsversionNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldOsversion)) +} + +// OsversionEqualFold applies the EqualFold predicate on the "osversion" field. +func OsversionEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldOsversion, v)) +} + +// OsversionContainsFold applies the ContainsFold predicate on the "osversion" field. +func OsversionContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldOsversion, v)) +} + +// FeatureflagsEQ applies the EQ predicate on the "featureflags" field. +func FeatureflagsEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldFeatureflags, v)) +} + +// FeatureflagsNEQ applies the NEQ predicate on the "featureflags" field. +func FeatureflagsNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldFeatureflags, v)) +} + +// FeatureflagsIn applies the In predicate on the "featureflags" field. +func FeatureflagsIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsNotIn applies the NotIn predicate on the "featureflags" field. +func FeatureflagsNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsGT applies the GT predicate on the "featureflags" field. +func FeatureflagsGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldFeatureflags, v)) +} + +// FeatureflagsGTE applies the GTE predicate on the "featureflags" field. +func FeatureflagsGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldFeatureflags, v)) +} + +// FeatureflagsLT applies the LT predicate on the "featureflags" field. +func FeatureflagsLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldFeatureflags, v)) +} + +// FeatureflagsLTE applies the LTE predicate on the "featureflags" field. +func FeatureflagsLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldFeatureflags, v)) +} + +// FeatureflagsContains applies the Contains predicate on the "featureflags" field. +func FeatureflagsContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldFeatureflags, v)) +} + +// FeatureflagsHasPrefix applies the HasPrefix predicate on the "featureflags" field. +func FeatureflagsHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldFeatureflags, v)) +} + +// FeatureflagsHasSuffix applies the HasSuffix predicate on the "featureflags" field. +func FeatureflagsHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldFeatureflags, v)) +} + +// FeatureflagsIsNil applies the IsNil predicate on the "featureflags" field. +func FeatureflagsIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldFeatureflags)) +} + +// FeatureflagsNotNil applies the NotNil predicate on the "featureflags" field. +func FeatureflagsNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldFeatureflags)) +} + +// FeatureflagsEqualFold applies the EqualFold predicate on the "featureflags" field. +func FeatureflagsEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldFeatureflags, v)) +} + +// FeatureflagsContainsFold applies the ContainsFold predicate on the "featureflags" field. +func FeatureflagsContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldFeatureflags, v)) +} + +// HubstateIsNil applies the IsNil predicate on the "hubstate" field. +func HubstateIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldHubstate)) +} + +// HubstateNotNil applies the NotNil predicate on the "hubstate" field. +func HubstateNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldHubstate)) +} + // HasAlerts applies the HasEdge predicate on the "alerts" edge. func HasAlerts() predicate.Machine { return predicate.Machine(func(s *sql.Selector) { diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index ff704e6ab74..d6bd53a3d91 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/schema/field" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/models" ) // MachineCreate is the builder for creating a Machine entity. @@ -165,6 +166,54 @@ func (mc *MachineCreate) SetNillableAuthType(s *string) *MachineCreate { return mc } +// SetOsname sets the "osname" field. +func (mc *MachineCreate) SetOsname(s string) *MachineCreate { + mc.mutation.SetOsname(s) + return mc +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (mc *MachineCreate) SetNillableOsname(s *string) *MachineCreate { + if s != nil { + mc.SetOsname(*s) + } + return mc +} + +// SetOsversion sets the "osversion" field. +func (mc *MachineCreate) SetOsversion(s string) *MachineCreate { + mc.mutation.SetOsversion(s) + return mc +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (mc *MachineCreate) SetNillableOsversion(s *string) *MachineCreate { + if s != nil { + mc.SetOsversion(*s) + } + return mc +} + +// SetFeatureflags sets the "featureflags" field. +func (mc *MachineCreate) SetFeatureflags(s string) *MachineCreate { + mc.mutation.SetFeatureflags(s) + return mc +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (mc *MachineCreate) SetNillableFeatureflags(s *string) *MachineCreate { + if s != nil { + mc.SetFeatureflags(*s) + } + return mc +} + +// SetHubstate sets the "hubstate" field. +func (mc *MachineCreate) SetHubstate(mi *models.HubItems) *MachineCreate { + mc.mutation.SetHubstate(mi) + return mc +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mc *MachineCreate) AddAlertIDs(ids ...int) *MachineCreate { mc.mutation.AddAlertIDs(ids...) @@ -243,6 +292,12 @@ func (mc *MachineCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (mc *MachineCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Machine.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Machine.updated_at"`)} + } if _, ok := mc.mutation.MachineId(); !ok { return &ValidationError{Name: "machineId", err: errors.New(`ent: missing required field "Machine.machineId"`)} } @@ -291,11 +346,11 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { ) if value, ok := mc.mutation.CreatedAt(); ok { _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := mc.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := mc.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) @@ -337,6 +392,22 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value } + if value, ok := mc.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + _node.Osname = value + } + if value, ok := mc.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + _node.Osversion = value + } + if value, ok := mc.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + _node.Featureflags = value + } + if value, ok := mc.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + _node.Hubstate = value + } if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index 1f87ac04d6f..2a70ac70f36 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/models" ) // MachineUpdate is the builder for updating Machine entities. @@ -29,30 +30,12 @@ func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate { return mu } -// SetCreatedAt sets the "created_at" field. -func (mu *MachineUpdate) SetCreatedAt(t time.Time) *MachineUpdate { - mu.mutation.SetCreatedAt(t) - return mu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (mu *MachineUpdate) ClearCreatedAt() *MachineUpdate { - mu.mutation.ClearCreatedAt() - return mu -} - // SetUpdatedAt sets the "updated_at" field. func (mu *MachineUpdate) SetUpdatedAt(t time.Time) *MachineUpdate { mu.mutation.SetUpdatedAt(t) return mu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (mu *MachineUpdate) ClearUpdatedAt() *MachineUpdate { - mu.mutation.ClearUpdatedAt() - return mu -} - // SetLastPush sets the "last_push" field. func (mu *MachineUpdate) SetLastPush(t time.Time) *MachineUpdate { mu.mutation.SetLastPush(t) @@ -77,20 +60,6 @@ func (mu *MachineUpdate) ClearLastHeartbeat() *MachineUpdate { return mu } -// SetMachineId sets the "machineId" field. -func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { - mu.mutation.SetMachineId(s) - return mu -} - -// SetNillableMachineId sets the "machineId" field if the given value is not nil. -func (mu *MachineUpdate) SetNillableMachineId(s *string) *MachineUpdate { - if s != nil { - mu.SetMachineId(*s) - } - return mu -} - // SetPassword sets the "password" field. func (mu *MachineUpdate) SetPassword(s string) *MachineUpdate { mu.mutation.SetPassword(s) @@ -207,6 +176,78 @@ func (mu *MachineUpdate) SetNillableAuthType(s *string) *MachineUpdate { return mu } +// SetOsname sets the "osname" field. +func (mu *MachineUpdate) SetOsname(s string) *MachineUpdate { + mu.mutation.SetOsname(s) + return mu +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableOsname(s *string) *MachineUpdate { + if s != nil { + mu.SetOsname(*s) + } + return mu +} + +// ClearOsname clears the value of the "osname" field. +func (mu *MachineUpdate) ClearOsname() *MachineUpdate { + mu.mutation.ClearOsname() + return mu +} + +// SetOsversion sets the "osversion" field. +func (mu *MachineUpdate) SetOsversion(s string) *MachineUpdate { + mu.mutation.SetOsversion(s) + return mu +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableOsversion(s *string) *MachineUpdate { + if s != nil { + mu.SetOsversion(*s) + } + return mu +} + +// ClearOsversion clears the value of the "osversion" field. +func (mu *MachineUpdate) ClearOsversion() *MachineUpdate { + mu.mutation.ClearOsversion() + return mu +} + +// SetFeatureflags sets the "featureflags" field. +func (mu *MachineUpdate) SetFeatureflags(s string) *MachineUpdate { + mu.mutation.SetFeatureflags(s) + return mu +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableFeatureflags(s *string) *MachineUpdate { + if s != nil { + mu.SetFeatureflags(*s) + } + return mu +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (mu *MachineUpdate) ClearFeatureflags() *MachineUpdate { + mu.mutation.ClearFeatureflags() + return mu +} + +// SetHubstate sets the "hubstate" field. +func (mu *MachineUpdate) SetHubstate(mi *models.HubItems) *MachineUpdate { + mu.mutation.SetHubstate(mi) + return mu +} + +// ClearHubstate clears the value of the "hubstate" field. +func (mu *MachineUpdate) ClearHubstate() *MachineUpdate { + mu.mutation.ClearHubstate() + return mu +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mu *MachineUpdate) AddAlertIDs(ids ...int) *MachineUpdate { mu.mutation.AddAlertIDs(ids...) @@ -278,11 +319,7 @@ func (mu *MachineUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (mu *MachineUpdate) defaults() { - if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { - v := machine.UpdateDefaultCreatedAt() - mu.mutation.SetCreatedAt(v) - } - if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + if _, ok := mu.mutation.UpdatedAt(); !ok { v := machine.UpdateDefaultUpdatedAt() mu.mutation.SetUpdatedAt(v) } @@ -318,18 +355,9 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := mu.mutation.CreatedAt(); ok { - _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - } - if mu.mutation.CreatedAtCleared() { - _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } - if mu.mutation.UpdatedAtCleared() { - _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) - } if value, ok := mu.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } @@ -342,9 +370,6 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if mu.mutation.LastHeartbeatCleared() { _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } - if value, ok := mu.mutation.MachineId(); ok { - _spec.SetField(machine.FieldMachineId, field.TypeString, value) - } if value, ok := mu.mutation.Password(); ok { _spec.SetField(machine.FieldPassword, field.TypeString, value) } @@ -375,6 +400,30 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } + if value, ok := mu.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + } + if mu.mutation.OsnameCleared() { + _spec.ClearField(machine.FieldOsname, field.TypeString) + } + if value, ok := mu.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + } + if mu.mutation.OsversionCleared() { + _spec.ClearField(machine.FieldOsversion, field.TypeString) + } + if value, ok := mu.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + } + if mu.mutation.FeatureflagsCleared() { + _spec.ClearField(machine.FieldFeatureflags, field.TypeString) + } + if value, ok := mu.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + } + if mu.mutation.HubstateCleared() { + _spec.ClearField(machine.FieldHubstate, field.TypeJSON) + } if mu.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -440,30 +489,12 @@ type MachineUpdateOne struct { mutation *MachineMutation } -// SetCreatedAt sets the "created_at" field. -func (muo *MachineUpdateOne) SetCreatedAt(t time.Time) *MachineUpdateOne { - muo.mutation.SetCreatedAt(t) - return muo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (muo *MachineUpdateOne) ClearCreatedAt() *MachineUpdateOne { - muo.mutation.ClearCreatedAt() - return muo -} - // SetUpdatedAt sets the "updated_at" field. func (muo *MachineUpdateOne) SetUpdatedAt(t time.Time) *MachineUpdateOne { muo.mutation.SetUpdatedAt(t) return muo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (muo *MachineUpdateOne) ClearUpdatedAt() *MachineUpdateOne { - muo.mutation.ClearUpdatedAt() - return muo -} - // SetLastPush sets the "last_push" field. func (muo *MachineUpdateOne) SetLastPush(t time.Time) *MachineUpdateOne { muo.mutation.SetLastPush(t) @@ -488,20 +519,6 @@ func (muo *MachineUpdateOne) ClearLastHeartbeat() *MachineUpdateOne { return muo } -// SetMachineId sets the "machineId" field. -func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { - muo.mutation.SetMachineId(s) - return muo -} - -// SetNillableMachineId sets the "machineId" field if the given value is not nil. -func (muo *MachineUpdateOne) SetNillableMachineId(s *string) *MachineUpdateOne { - if s != nil { - muo.SetMachineId(*s) - } - return muo -} - // SetPassword sets the "password" field. func (muo *MachineUpdateOne) SetPassword(s string) *MachineUpdateOne { muo.mutation.SetPassword(s) @@ -618,6 +635,78 @@ func (muo *MachineUpdateOne) SetNillableAuthType(s *string) *MachineUpdateOne { return muo } +// SetOsname sets the "osname" field. +func (muo *MachineUpdateOne) SetOsname(s string) *MachineUpdateOne { + muo.mutation.SetOsname(s) + return muo +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableOsname(s *string) *MachineUpdateOne { + if s != nil { + muo.SetOsname(*s) + } + return muo +} + +// ClearOsname clears the value of the "osname" field. +func (muo *MachineUpdateOne) ClearOsname() *MachineUpdateOne { + muo.mutation.ClearOsname() + return muo +} + +// SetOsversion sets the "osversion" field. +func (muo *MachineUpdateOne) SetOsversion(s string) *MachineUpdateOne { + muo.mutation.SetOsversion(s) + return muo +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableOsversion(s *string) *MachineUpdateOne { + if s != nil { + muo.SetOsversion(*s) + } + return muo +} + +// ClearOsversion clears the value of the "osversion" field. +func (muo *MachineUpdateOne) ClearOsversion() *MachineUpdateOne { + muo.mutation.ClearOsversion() + return muo +} + +// SetFeatureflags sets the "featureflags" field. +func (muo *MachineUpdateOne) SetFeatureflags(s string) *MachineUpdateOne { + muo.mutation.SetFeatureflags(s) + return muo +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableFeatureflags(s *string) *MachineUpdateOne { + if s != nil { + muo.SetFeatureflags(*s) + } + return muo +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (muo *MachineUpdateOne) ClearFeatureflags() *MachineUpdateOne { + muo.mutation.ClearFeatureflags() + return muo +} + +// SetHubstate sets the "hubstate" field. +func (muo *MachineUpdateOne) SetHubstate(mi *models.HubItems) *MachineUpdateOne { + muo.mutation.SetHubstate(mi) + return muo +} + +// ClearHubstate clears the value of the "hubstate" field. +func (muo *MachineUpdateOne) ClearHubstate() *MachineUpdateOne { + muo.mutation.ClearHubstate() + return muo +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (muo *MachineUpdateOne) AddAlertIDs(ids ...int) *MachineUpdateOne { muo.mutation.AddAlertIDs(ids...) @@ -702,11 +791,7 @@ func (muo *MachineUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (muo *MachineUpdateOne) defaults() { - if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { - v := machine.UpdateDefaultCreatedAt() - muo.mutation.SetCreatedAt(v) - } - if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + if _, ok := muo.mutation.UpdatedAt(); !ok { v := machine.UpdateDefaultUpdatedAt() muo.mutation.SetUpdatedAt(v) } @@ -759,18 +844,9 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } } } - if value, ok := muo.mutation.CreatedAt(); ok { - _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - } - if muo.mutation.CreatedAtCleared() { - _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } - if muo.mutation.UpdatedAtCleared() { - _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) - } if value, ok := muo.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } @@ -783,9 +859,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if muo.mutation.LastHeartbeatCleared() { _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } - if value, ok := muo.mutation.MachineId(); ok { - _spec.SetField(machine.FieldMachineId, field.TypeString, value) - } if value, ok := muo.mutation.Password(); ok { _spec.SetField(machine.FieldPassword, field.TypeString, value) } @@ -816,6 +889,30 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if value, ok := muo.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } + if value, ok := muo.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + } + if muo.mutation.OsnameCleared() { + _spec.ClearField(machine.FieldOsname, field.TypeString) + } + if value, ok := muo.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + } + if muo.mutation.OsversionCleared() { + _spec.ClearField(machine.FieldOsversion, field.TypeString) + } + if value, ok := muo.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + } + if muo.mutation.FeatureflagsCleared() { + _spec.ClearField(machine.FieldFeatureflags, field.TypeString) + } + if value, ok := muo.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + } + if muo.mutation.HubstateCleared() { + _spec.ClearField(machine.FieldHubstate, field.TypeJSON) + } if muo.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index cadc210937e..768358ca2bf 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -19,9 +19,9 @@ type Meta struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Key holds the value of the "key" field. Key string `json:"key,omitempty"` // Value holds the value of the "value" field. @@ -92,15 +92,13 @@ func (m *Meta) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = new(time.Time) - *m.CreatedAt = value.Time + m.CreatedAt = value.Time } case meta.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = new(time.Time) - *m.UpdatedAt = value.Time + m.UpdatedAt = value.Time } case meta.FieldKey: if value, ok := values[i].(*sql.NullString); !ok { @@ -161,15 +159,11 @@ func (m *Meta) String() string { var builder strings.Builder builder.WriteString("Meta(") builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) - if v := m.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := m.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("key=") builder.WriteString(m.Key) diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go index 583496fb710..ff41361616a 100644 --- a/pkg/database/ent/meta/meta.go +++ b/pkg/database/ent/meta/meta.go @@ -60,8 +60,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go index 7fc99136972..6d5d54c0482 100644 --- a/pkg/database/ent/meta/where.go +++ b/pkg/database/ent/meta/where.go @@ -120,16 +120,6 @@ func CreatedAtLTE(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Meta { - return predicate.Meta(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Meta { - return predicate.Meta(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) @@ -170,16 +160,6 @@ func UpdatedAtLTE(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Meta { - return predicate.Meta(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Meta { - return predicate.Meta(sql.FieldNotNull(FieldUpdatedAt)) -} - // KeyEQ applies the EQ predicate on the "key" field. func KeyEQ(v string) predicate.Meta { return predicate.Meta(sql.FieldEQ(FieldKey, v)) diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go index 3bf30f0def9..321c4bd7ab4 100644 --- a/pkg/database/ent/meta_create.go +++ b/pkg/database/ent/meta_create.go @@ -141,6 +141,12 @@ func (mc *MetaCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (mc *MetaCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Meta.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Meta.updated_at"`)} + } if _, ok := mc.mutation.Key(); !ok { return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "Meta.key"`)} } @@ -180,11 +186,11 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { ) if value, ok := mc.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := mc.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := mc.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index a1379faa130..76567c5eff7 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -35,9 +35,11 @@ func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { return mu } -// ClearCreatedAt clears the value of the "created_at" field. -func (mu *MetaUpdate) ClearCreatedAt() *MetaUpdate { - mu.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableCreatedAt(t *time.Time) *MetaUpdate { + if t != nil { + mu.SetCreatedAt(*t) + } return mu } @@ -47,12 +49,6 @@ func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { return mu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (mu *MetaUpdate) ClearUpdatedAt() *MetaUpdate { - mu.mutation.ClearUpdatedAt() - return mu -} - // SetKey sets the "key" field. func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { mu.mutation.SetKey(s) @@ -161,11 +157,7 @@ func (mu *MetaUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (mu *MetaUpdate) defaults() { - if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { - v := meta.UpdateDefaultCreatedAt() - mu.mutation.SetCreatedAt(v) - } - if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + if _, ok := mu.mutation.UpdatedAt(); !ok { v := meta.UpdateDefaultUpdatedAt() mu.mutation.SetUpdatedAt(v) } @@ -196,15 +188,9 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } - if mu.mutation.CreatedAtCleared() { - _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if mu.mutation.UpdatedAtCleared() { - _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) - } if value, ok := mu.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) } @@ -266,9 +252,11 @@ func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { return muo } -// ClearCreatedAt clears the value of the "created_at" field. -func (muo *MetaUpdateOne) ClearCreatedAt() *MetaUpdateOne { - muo.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableCreatedAt(t *time.Time) *MetaUpdateOne { + if t != nil { + muo.SetCreatedAt(*t) + } return muo } @@ -278,12 +266,6 @@ func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { return muo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (muo *MetaUpdateOne) ClearUpdatedAt() *MetaUpdateOne { - muo.mutation.ClearUpdatedAt() - return muo -} - // SetKey sets the "key" field. func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { muo.mutation.SetKey(s) @@ -405,11 +387,7 @@ func (muo *MetaUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (muo *MetaUpdateOne) defaults() { - if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { - v := meta.UpdateDefaultCreatedAt() - muo.mutation.SetCreatedAt(v) - } - if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + if _, ok := muo.mutation.UpdatedAt(); !ok { v := meta.UpdateDefaultUpdatedAt() muo.mutation.SetUpdatedAt(v) } @@ -457,15 +435,9 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) if value, ok := muo.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } - if muo.mutation.CreatedAtCleared() { - _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if muo.mutation.UpdatedAtCleared() { - _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) - } if value, ok := muo.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) } diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go new file mode 100644 index 00000000000..236d54da25d --- /dev/null +++ b/pkg/database/ent/metric.go @@ -0,0 +1,154 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// Metric is the model entity for the Metric schema. +type Metric struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Type of the metrics source: LP=logprocessor, RC=remediation + GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` + // Source of the metrics: machine id, bouncer name... + // It must come from the auth middleware. + GeneratedBy string `json:"generated_by,omitempty"` + // When the metrics are collected/calculated at the source + CollectedAt time.Time `json:"collected_at,omitempty"` + // When the metrics are sent to the console + PushedAt *time.Time `json:"pushed_at,omitempty"` + // The actual metrics (item0) + Payload string `json:"payload,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Metric) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case metric.FieldID: + values[i] = new(sql.NullInt64) + case metric.FieldGeneratedType, metric.FieldGeneratedBy, metric.FieldPayload: + values[i] = new(sql.NullString) + case metric.FieldCollectedAt, metric.FieldPushedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Metric fields. +func (m *Metric) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case metric.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + case metric.FieldGeneratedType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field generated_type", values[i]) + } else if value.Valid { + m.GeneratedType = metric.GeneratedType(value.String) + } + case metric.FieldGeneratedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field generated_by", values[i]) + } else if value.Valid { + m.GeneratedBy = value.String + } + case metric.FieldCollectedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field collected_at", values[i]) + } else if value.Valid { + m.CollectedAt = value.Time + } + case metric.FieldPushedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field pushed_at", values[i]) + } else if value.Valid { + m.PushedAt = new(time.Time) + *m.PushedAt = value.Time + } + case metric.FieldPayload: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field payload", values[i]) + } else if value.Valid { + m.Payload = value.String + } + default: + m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Metric. +// This includes values selected through modifiers, order, etc. +func (m *Metric) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + +// Update returns a builder for updating this Metric. +// Note that you need to call Metric.Unwrap() before calling this method if this Metric +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Metric) Update() *MetricUpdateOne { + return NewMetricClient(m.config).UpdateOne(m) +} + +// Unwrap unwraps the Metric entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Metric) Unwrap() *Metric { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Metric is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Metric) String() string { + var builder strings.Builder + builder.WriteString("Metric(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + builder.WriteString("generated_type=") + builder.WriteString(fmt.Sprintf("%v", m.GeneratedType)) + builder.WriteString(", ") + builder.WriteString("generated_by=") + builder.WriteString(m.GeneratedBy) + builder.WriteString(", ") + builder.WriteString("collected_at=") + builder.WriteString(m.CollectedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := m.PushedAt; v != nil { + builder.WriteString("pushed_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("payload=") + builder.WriteString(m.Payload) + builder.WriteByte(')') + return builder.String() +} + +// Metrics is a parsable slice of Metric. +type Metrics []*Metric diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go new file mode 100644 index 00000000000..879f1006d64 --- /dev/null +++ b/pkg/database/ent/metric/metric.go @@ -0,0 +1,104 @@ +// Code generated by ent, DO NOT EDIT. + +package metric + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the metric type in the database. + Label = "metric" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldGeneratedType holds the string denoting the generated_type field in the database. + FieldGeneratedType = "generated_type" + // FieldGeneratedBy holds the string denoting the generated_by field in the database. + FieldGeneratedBy = "generated_by" + // FieldCollectedAt holds the string denoting the collected_at field in the database. + FieldCollectedAt = "collected_at" + // FieldPushedAt holds the string denoting the pushed_at field in the database. + FieldPushedAt = "pushed_at" + // FieldPayload holds the string denoting the payload field in the database. + FieldPayload = "payload" + // Table holds the table name of the metric in the database. + Table = "metrics" +) + +// Columns holds all SQL columns for metric fields. +var Columns = []string{ + FieldID, + FieldGeneratedType, + FieldGeneratedBy, + FieldCollectedAt, + FieldPushedAt, + FieldPayload, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// GeneratedType defines the type for the "generated_type" enum field. +type GeneratedType string + +// GeneratedType values. +const ( + GeneratedTypeLP GeneratedType = "LP" + GeneratedTypeRC GeneratedType = "RC" +) + +func (gt GeneratedType) String() string { + return string(gt) +} + +// GeneratedTypeValidator is a validator for the "generated_type" field enum values. It is called by the builders before save. +func GeneratedTypeValidator(gt GeneratedType) error { + switch gt { + case GeneratedTypeLP, GeneratedTypeRC: + return nil + default: + return fmt.Errorf("metric: invalid enum value for generated_type field: %q", gt) + } +} + +// OrderOption defines the ordering options for the Metric queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByGeneratedType orders the results by the generated_type field. +func ByGeneratedType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeneratedType, opts...).ToFunc() +} + +// ByGeneratedBy orders the results by the generated_by field. +func ByGeneratedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeneratedBy, opts...).ToFunc() +} + +// ByCollectedAt orders the results by the collected_at field. +func ByCollectedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCollectedAt, opts...).ToFunc() +} + +// ByPushedAt orders the results by the pushed_at field. +func ByPushedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPushedAt, opts...).ToFunc() +} + +// ByPayload orders the results by the payload field. +func ByPayload(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPayload, opts...).ToFunc() +} diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go new file mode 100644 index 00000000000..e49f80f3411 --- /dev/null +++ b/pkg/database/ent/metric/where.go @@ -0,0 +1,330 @@ +// Code generated by ent, DO NOT EDIT. + +package metric + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldID, id)) +} + +// GeneratedBy applies equality check predicate on the "generated_by" field. It's identical to GeneratedByEQ. +func GeneratedBy(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) +} + +// CollectedAt applies equality check predicate on the "collected_at" field. It's identical to CollectedAtEQ. +func CollectedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +} + +// PushedAt applies equality check predicate on the "pushed_at" field. It's identical to PushedAtEQ. +func PushedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) +} + +// Payload applies equality check predicate on the "payload" field. It's identical to PayloadEQ. +func Payload(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPayload, v)) +} + +// GeneratedTypeEQ applies the EQ predicate on the "generated_type" field. +func GeneratedTypeEQ(v GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedType, v)) +} + +// GeneratedTypeNEQ applies the NEQ predicate on the "generated_type" field. +func GeneratedTypeNEQ(v GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldGeneratedType, v)) +} + +// GeneratedTypeIn applies the In predicate on the "generated_type" field. +func GeneratedTypeIn(vs ...GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldGeneratedType, vs...)) +} + +// GeneratedTypeNotIn applies the NotIn predicate on the "generated_type" field. +func GeneratedTypeNotIn(vs ...GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldGeneratedType, vs...)) +} + +// GeneratedByEQ applies the EQ predicate on the "generated_by" field. +func GeneratedByEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) +} + +// GeneratedByNEQ applies the NEQ predicate on the "generated_by" field. +func GeneratedByNEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldGeneratedBy, v)) +} + +// GeneratedByIn applies the In predicate on the "generated_by" field. +func GeneratedByIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldGeneratedBy, vs...)) +} + +// GeneratedByNotIn applies the NotIn predicate on the "generated_by" field. +func GeneratedByNotIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldGeneratedBy, vs...)) +} + +// GeneratedByGT applies the GT predicate on the "generated_by" field. +func GeneratedByGT(v string) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldGeneratedBy, v)) +} + +// GeneratedByGTE applies the GTE predicate on the "generated_by" field. +func GeneratedByGTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldGeneratedBy, v)) +} + +// GeneratedByLT applies the LT predicate on the "generated_by" field. +func GeneratedByLT(v string) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldGeneratedBy, v)) +} + +// GeneratedByLTE applies the LTE predicate on the "generated_by" field. +func GeneratedByLTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldGeneratedBy, v)) +} + +// GeneratedByContains applies the Contains predicate on the "generated_by" field. +func GeneratedByContains(v string) predicate.Metric { + return predicate.Metric(sql.FieldContains(FieldGeneratedBy, v)) +} + +// GeneratedByHasPrefix applies the HasPrefix predicate on the "generated_by" field. +func GeneratedByHasPrefix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasPrefix(FieldGeneratedBy, v)) +} + +// GeneratedByHasSuffix applies the HasSuffix predicate on the "generated_by" field. +func GeneratedByHasSuffix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasSuffix(FieldGeneratedBy, v)) +} + +// GeneratedByEqualFold applies the EqualFold predicate on the "generated_by" field. +func GeneratedByEqualFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldEqualFold(FieldGeneratedBy, v)) +} + +// GeneratedByContainsFold applies the ContainsFold predicate on the "generated_by" field. +func GeneratedByContainsFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldContainsFold(FieldGeneratedBy, v)) +} + +// CollectedAtEQ applies the EQ predicate on the "collected_at" field. +func CollectedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +} + +// CollectedAtNEQ applies the NEQ predicate on the "collected_at" field. +func CollectedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldCollectedAt, v)) +} + +// CollectedAtIn applies the In predicate on the "collected_at" field. +func CollectedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldCollectedAt, vs...)) +} + +// CollectedAtNotIn applies the NotIn predicate on the "collected_at" field. +func CollectedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldCollectedAt, vs...)) +} + +// CollectedAtGT applies the GT predicate on the "collected_at" field. +func CollectedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldCollectedAt, v)) +} + +// CollectedAtGTE applies the GTE predicate on the "collected_at" field. +func CollectedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldCollectedAt, v)) +} + +// CollectedAtLT applies the LT predicate on the "collected_at" field. +func CollectedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldCollectedAt, v)) +} + +// CollectedAtLTE applies the LTE predicate on the "collected_at" field. +func CollectedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldCollectedAt, v)) +} + +// PushedAtEQ applies the EQ predicate on the "pushed_at" field. +func PushedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) +} + +// PushedAtNEQ applies the NEQ predicate on the "pushed_at" field. +func PushedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldPushedAt, v)) +} + +// PushedAtIn applies the In predicate on the "pushed_at" field. +func PushedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldPushedAt, vs...)) +} + +// PushedAtNotIn applies the NotIn predicate on the "pushed_at" field. +func PushedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldPushedAt, vs...)) +} + +// PushedAtGT applies the GT predicate on the "pushed_at" field. +func PushedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldPushedAt, v)) +} + +// PushedAtGTE applies the GTE predicate on the "pushed_at" field. +func PushedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldPushedAt, v)) +} + +// PushedAtLT applies the LT predicate on the "pushed_at" field. +func PushedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldPushedAt, v)) +} + +// PushedAtLTE applies the LTE predicate on the "pushed_at" field. +func PushedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldPushedAt, v)) +} + +// PushedAtIsNil applies the IsNil predicate on the "pushed_at" field. +func PushedAtIsNil() predicate.Metric { + return predicate.Metric(sql.FieldIsNull(FieldPushedAt)) +} + +// PushedAtNotNil applies the NotNil predicate on the "pushed_at" field. +func PushedAtNotNil() predicate.Metric { + return predicate.Metric(sql.FieldNotNull(FieldPushedAt)) +} + +// PayloadEQ applies the EQ predicate on the "payload" field. +func PayloadEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPayload, v)) +} + +// PayloadNEQ applies the NEQ predicate on the "payload" field. +func PayloadNEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldPayload, v)) +} + +// PayloadIn applies the In predicate on the "payload" field. +func PayloadIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldPayload, vs...)) +} + +// PayloadNotIn applies the NotIn predicate on the "payload" field. +func PayloadNotIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldPayload, vs...)) +} + +// PayloadGT applies the GT predicate on the "payload" field. +func PayloadGT(v string) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldPayload, v)) +} + +// PayloadGTE applies the GTE predicate on the "payload" field. +func PayloadGTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldPayload, v)) +} + +// PayloadLT applies the LT predicate on the "payload" field. +func PayloadLT(v string) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldPayload, v)) +} + +// PayloadLTE applies the LTE predicate on the "payload" field. +func PayloadLTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldPayload, v)) +} + +// PayloadContains applies the Contains predicate on the "payload" field. +func PayloadContains(v string) predicate.Metric { + return predicate.Metric(sql.FieldContains(FieldPayload, v)) +} + +// PayloadHasPrefix applies the HasPrefix predicate on the "payload" field. +func PayloadHasPrefix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasPrefix(FieldPayload, v)) +} + +// PayloadHasSuffix applies the HasSuffix predicate on the "payload" field. +func PayloadHasSuffix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasSuffix(FieldPayload, v)) +} + +// PayloadEqualFold applies the EqualFold predicate on the "payload" field. +func PayloadEqualFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldEqualFold(FieldPayload, v)) +} + +// PayloadContainsFold applies the ContainsFold predicate on the "payload" field. +func PayloadContainsFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldContainsFold(FieldPayload, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Metric) predicate.Metric { + return predicate.Metric(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Metric) predicate.Metric { + return predicate.Metric(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Metric) predicate.Metric { + return predicate.Metric(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go new file mode 100644 index 00000000000..8fa656db427 --- /dev/null +++ b/pkg/database/ent/metric_create.go @@ -0,0 +1,246 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// MetricCreate is the builder for creating a Metric entity. +type MetricCreate struct { + config + mutation *MetricMutation + hooks []Hook +} + +// SetGeneratedType sets the "generated_type" field. +func (mc *MetricCreate) SetGeneratedType(mt metric.GeneratedType) *MetricCreate { + mc.mutation.SetGeneratedType(mt) + return mc +} + +// SetGeneratedBy sets the "generated_by" field. +func (mc *MetricCreate) SetGeneratedBy(s string) *MetricCreate { + mc.mutation.SetGeneratedBy(s) + return mc +} + +// SetCollectedAt sets the "collected_at" field. +func (mc *MetricCreate) SetCollectedAt(t time.Time) *MetricCreate { + mc.mutation.SetCollectedAt(t) + return mc +} + +// SetPushedAt sets the "pushed_at" field. +func (mc *MetricCreate) SetPushedAt(t time.Time) *MetricCreate { + mc.mutation.SetPushedAt(t) + return mc +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (mc *MetricCreate) SetNillablePushedAt(t *time.Time) *MetricCreate { + if t != nil { + mc.SetPushedAt(*t) + } + return mc +} + +// SetPayload sets the "payload" field. +func (mc *MetricCreate) SetPayload(s string) *MetricCreate { + mc.mutation.SetPayload(s) + return mc +} + +// Mutation returns the MetricMutation object of the builder. +func (mc *MetricCreate) Mutation() *MetricMutation { + return mc.mutation +} + +// Save creates the Metric in the database. +func (mc *MetricCreate) Save(ctx context.Context) (*Metric, error) { + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MetricCreate) SaveX(ctx context.Context) *Metric { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MetricCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MetricCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MetricCreate) check() error { + if _, ok := mc.mutation.GeneratedType(); !ok { + return &ValidationError{Name: "generated_type", err: errors.New(`ent: missing required field "Metric.generated_type"`)} + } + if v, ok := mc.mutation.GeneratedType(); ok { + if err := metric.GeneratedTypeValidator(v); err != nil { + return &ValidationError{Name: "generated_type", err: fmt.Errorf(`ent: validator failed for field "Metric.generated_type": %w`, err)} + } + } + if _, ok := mc.mutation.GeneratedBy(); !ok { + return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} + } + if _, ok := mc.mutation.CollectedAt(); !ok { + return &ValidationError{Name: "collected_at", err: errors.New(`ent: missing required field "Metric.collected_at"`)} + } + if _, ok := mc.mutation.Payload(); !ok { + return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} + } + return nil +} + +func (mc *MetricCreate) sqlSave(ctx context.Context) (*Metric, error) { + if err := mc.check(); err != nil { + return nil, err + } + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true + return _node, nil +} + +func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { + var ( + _node = &Metric{config: mc.config} + _spec = sqlgraph.NewCreateSpec(metric.Table, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + ) + if value, ok := mc.mutation.GeneratedType(); ok { + _spec.SetField(metric.FieldGeneratedType, field.TypeEnum, value) + _node.GeneratedType = value + } + if value, ok := mc.mutation.GeneratedBy(); ok { + _spec.SetField(metric.FieldGeneratedBy, field.TypeString, value) + _node.GeneratedBy = value + } + if value, ok := mc.mutation.CollectedAt(); ok { + _spec.SetField(metric.FieldCollectedAt, field.TypeTime, value) + _node.CollectedAt = value + } + if value, ok := mc.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + _node.PushedAt = &value + } + if value, ok := mc.mutation.Payload(); ok { + _spec.SetField(metric.FieldPayload, field.TypeString, value) + _node.Payload = value + } + return _node, _spec +} + +// MetricCreateBulk is the builder for creating many Metric entities in bulk. +type MetricCreateBulk struct { + config + err error + builders []*MetricCreate +} + +// Save creates the Metric entities in the database. +func (mcb *MetricCreateBulk) Save(ctx context.Context) ([]*Metric, error) { + if mcb.err != nil { + return nil, mcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Metric, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetricMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MetricCreateBulk) SaveX(ctx context.Context) []*Metric { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MetricCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MetricCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/metric_delete.go b/pkg/database/ent/metric_delete.go new file mode 100644 index 00000000000..d6606680a6a --- /dev/null +++ b/pkg/database/ent/metric_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricDelete is the builder for deleting a Metric entity. +type MetricDelete struct { + config + hooks []Hook + mutation *MetricMutation +} + +// Where appends a list predicates to the MetricDelete builder. +func (md *MetricDelete) Where(ps ...predicate.Metric) *MetricDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MetricDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MetricDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MetricDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(metric.Table, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + md.mutation.done = true + return affected, err +} + +// MetricDeleteOne is the builder for deleting a single Metric entity. +type MetricDeleteOne struct { + md *MetricDelete +} + +// Where appends a list predicates to the MetricDelete builder. +func (mdo *MetricDeleteOne) Where(ps ...predicate.Metric) *MetricDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + +// Exec executes the deletion query. +func (mdo *MetricDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{metric.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MetricDeleteOne) ExecX(ctx context.Context) { + if err := mdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/metric_query.go b/pkg/database/ent/metric_query.go new file mode 100644 index 00000000000..6e1c6f08b4a --- /dev/null +++ b/pkg/database/ent/metric_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricQuery is the builder for querying Metric entities. +type MetricQuery struct { + config + ctx *QueryContext + order []metric.OrderOption + inters []Interceptor + predicates []predicate.Metric + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MetricQuery builder. +func (mq *MetricQuery) Where(ps ...predicate.Metric) *MetricQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit the number of records to be returned by this query. +func (mq *MetricQuery) Limit(limit int) *MetricQuery { + mq.ctx.Limit = &limit + return mq +} + +// Offset to start from. +func (mq *MetricQuery) Offset(offset int) *MetricQuery { + mq.ctx.Offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MetricQuery) Unique(unique bool) *MetricQuery { + mq.ctx.Unique = &unique + return mq +} + +// Order specifies how the records should be ordered. +func (mq *MetricQuery) Order(o ...metric.OrderOption) *MetricQuery { + mq.order = append(mq.order, o...) + return mq +} + +// First returns the first Metric entity from the query. +// Returns a *NotFoundError when no Metric was found. +func (mq *MetricQuery) First(ctx context.Context) (*Metric, error) { + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{metric.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MetricQuery) FirstX(ctx context.Context) *Metric { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Metric ID from the query. +// Returns a *NotFoundError when no Metric ID was found. +func (mq *MetricQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{metric.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MetricQuery) FirstIDX(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Metric entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Metric entity is found. +// Returns a *NotFoundError when no Metric entities are found. +func (mq *MetricQuery) Only(ctx context.Context) (*Metric, error) { + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{metric.Label} + default: + return nil, &NotSingularError{metric.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MetricQuery) OnlyX(ctx context.Context) *Metric { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Metric ID in the query. +// Returns a *NotSingularError when more than one Metric ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MetricQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{metric.Label} + default: + err = &NotSingularError{metric.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MetricQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Metrics. +func (mq *MetricQuery) All(ctx context.Context) ([]*Metric, error) { + ctx = setContextOp(ctx, mq.ctx, "All") + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Metric, *MetricQuery]() + return withInterceptors[[]*Metric](ctx, mq, qr, mq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MetricQuery) AllX(ctx context.Context) []*Metric { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Metric IDs. +func (mq *MetricQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(metric.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MetricQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MetricQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, mq, querierCount[*MetricQuery](), mq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MetricQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MetricQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MetricQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MetricQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MetricQuery) Clone() *MetricQuery { + if mq == nil { + return nil + } + return &MetricQuery{ + config: mq.config, + ctx: mq.ctx.Clone(), + order: append([]metric.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), + predicates: append([]predicate.Metric{}, mq.predicates...), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Metric.Query(). +// GroupBy(metric.FieldGeneratedType). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MetricQuery) GroupBy(field string, fields ...string) *MetricGroupBy { + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MetricGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields + grbuild.label = metric.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` +// } +// +// client.Metric.Query(). +// Select(metric.FieldGeneratedType). +// Scan(ctx, &v) +func (mq *MetricQuery) Select(fields ...string) *MetricSelect { + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MetricSelect{MetricQuery: mq} + sbuild.label = metric.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MetricSelect configured with the given aggregations. +func (mq *MetricQuery) Aggregate(fns ...AggregateFunc) *MetricSelect { + return mq.Select().Aggregate(fns...) +} + +func (mq *MetricQuery) prepareQuery(ctx context.Context) error { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { + if !metric.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MetricQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Metric, error) { + var ( + nodes = []*Metric{} + _spec = mq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Metric).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Metric{config: mq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (mq *MetricQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MetricQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true + } + if fields := mq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, metric.FieldID) + for i := range fields { + if fields[i] != metric.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MetricQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(metric.Table) + columns := mq.ctx.Fields + if len(columns) == 0 { + columns = metric.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.ctx.Unique != nil && *mq.ctx.Unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MetricGroupBy is the group-by builder for Metric entities. +type MetricGroupBy struct { + selector + build *MetricQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MetricGroupBy) Aggregate(fns ...AggregateFunc) *MetricGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the selector query and scans the result into the given value. +func (mgb *MetricGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MetricQuery, *MetricGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) +} + +func (mgb *MetricGroupBy) sqlScan(ctx context.Context, root *MetricQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*mgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// MetricSelect is the builder for selecting fields of Metric entities. +type MetricSelect struct { + *MetricQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MetricSelect) Aggregate(fns ...AggregateFunc) *MetricSelect { + ms.fns = append(ms.fns, fns...) + return ms +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MetricSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") + if err := ms.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MetricQuery, *MetricSelect](ctx, ms.MetricQuery, ms, ms.inters, v) +} + +func (ms *MetricSelect) sqlScan(ctx context.Context, root *MetricQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/metric_update.go b/pkg/database/ent/metric_update.go new file mode 100644 index 00000000000..4da33dd6ce9 --- /dev/null +++ b/pkg/database/ent/metric_update.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricUpdate is the builder for updating Metric entities. +type MetricUpdate struct { + config + hooks []Hook + mutation *MetricMutation +} + +// Where appends a list predicates to the MetricUpdate builder. +func (mu *MetricUpdate) Where(ps ...predicate.Metric) *MetricUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetPushedAt sets the "pushed_at" field. +func (mu *MetricUpdate) SetPushedAt(t time.Time) *MetricUpdate { + mu.mutation.SetPushedAt(t) + return mu +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (mu *MetricUpdate) SetNillablePushedAt(t *time.Time) *MetricUpdate { + if t != nil { + mu.SetPushedAt(*t) + } + return mu +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (mu *MetricUpdate) ClearPushedAt() *MetricUpdate { + mu.mutation.ClearPushedAt() + return mu +} + +// Mutation returns the MetricMutation object of the builder. +func (mu *MetricUpdate) Mutation() *MetricMutation { + return mu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MetricUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MetricUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MetricUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MetricUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +func (mu *MetricUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + } + if mu.mutation.PushedAtCleared() { + _spec.ClearField(metric.FieldPushedAt, field.TypeTime) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{metric.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + mu.mutation.done = true + return n, nil +} + +// MetricUpdateOne is the builder for updating a single Metric entity. +type MetricUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MetricMutation +} + +// SetPushedAt sets the "pushed_at" field. +func (muo *MetricUpdateOne) SetPushedAt(t time.Time) *MetricUpdateOne { + muo.mutation.SetPushedAt(t) + return muo +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (muo *MetricUpdateOne) SetNillablePushedAt(t *time.Time) *MetricUpdateOne { + if t != nil { + muo.SetPushedAt(*t) + } + return muo +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (muo *MetricUpdateOne) ClearPushedAt() *MetricUpdateOne { + muo.mutation.ClearPushedAt() + return muo +} + +// Mutation returns the MetricMutation object of the builder. +func (muo *MetricUpdateOne) Mutation() *MetricMutation { + return muo.mutation +} + +// Where appends a list predicates to the MetricUpdate builder. +func (muo *MetricUpdateOne) Where(ps ...predicate.Metric) *MetricUpdateOne { + muo.mutation.Where(ps...) + return muo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MetricUpdateOne) Select(field string, fields ...string) *MetricUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Metric entity. +func (muo *MetricUpdateOne) Save(ctx context.Context) (*Metric, error) { + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MetricUpdateOne) SaveX(ctx context.Context) *Metric { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MetricUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MetricUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +func (muo *MetricUpdateOne) sqlSave(ctx context.Context) (_node *Metric, err error) { + _spec := sqlgraph.NewUpdateSpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Metric.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, metric.FieldID) + for _, f := range fields { + if !metric.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != metric.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + } + if muo.mutation.PushedAtCleared() { + _spec.ClearField(metric.FieldPushedAt, field.TypeTime) + } + _node = &Metric{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{metric.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + muo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index c3ffed42239..208d7f657ac 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -11,8 +11,8 @@ var ( // AlertsColumns holds the columns for the "alerts" table. AlertsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "scenario", Type: field.TypeString}, {Name: "bucket_id", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "message", Type: field.TypeString, Nullable: true, Default: ""}, @@ -60,8 +60,8 @@ var ( // BouncersColumns holds the columns for the "bouncers" table. BouncersColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, {Name: "api_key", Type: field.TypeString}, {Name: "revoked", Type: field.TypeBool}, @@ -71,6 +71,9 @@ var ( {Name: "until", Type: field.TypeTime, Nullable: true}, {Name: "last_pull", Type: field.TypeTime}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, + {Name: "osname", Type: field.TypeString, Nullable: true}, + {Name: "osversion", Type: field.TypeString, Nullable: true}, + {Name: "featureflags", Type: field.TypeString, Nullable: true}, } // BouncersTable holds the schema information for the "bouncers" table. BouncersTable = &schema.Table{ @@ -81,8 +84,8 @@ var ( // ConfigItemsColumns holds the columns for the "config_items" table. ConfigItemsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, {Name: "value", Type: field.TypeString}, } @@ -95,8 +98,8 @@ var ( // DecisionsColumns holds the columns for the "decisions" table. DecisionsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}}, {Name: "scenario", Type: field.TypeString}, {Name: "type", Type: field.TypeString}, @@ -151,8 +154,8 @@ var ( // EventsColumns holds the columns for the "events" table. EventsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "time", Type: field.TypeTime}, {Name: "serialized", Type: field.TypeString, Size: 8191}, {Name: "alert_events", Type: field.TypeInt, Nullable: true}, @@ -193,8 +196,8 @@ var ( // MachinesColumns holds the columns for the "machines" table. MachinesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "last_push", Type: field.TypeTime, Nullable: true}, {Name: "last_heartbeat", Type: field.TypeTime, Nullable: true}, {Name: "machine_id", Type: field.TypeString, Unique: true}, @@ -205,6 +208,10 @@ var ( {Name: "is_validated", Type: field.TypeBool, Default: false}, {Name: "status", Type: field.TypeString, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "password"}, + {Name: "osname", Type: field.TypeString, Nullable: true}, + {Name: "osversion", Type: field.TypeString, Nullable: true}, + {Name: "featureflags", Type: field.TypeString, Nullable: true}, + {Name: "hubstate", Type: field.TypeJSON, Nullable: true}, } // MachinesTable holds the schema information for the "machines" table. MachinesTable = &schema.Table{ @@ -215,8 +222,8 @@ var ( // MetaColumns holds the columns for the "meta" table. MetaColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "key", Type: field.TypeString}, {Name: "value", Type: field.TypeString, Size: 4095}, {Name: "alert_metas", Type: field.TypeInt, Nullable: true}, @@ -242,6 +249,28 @@ var ( }, }, } + // MetricsColumns holds the columns for the "metrics" table. + MetricsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, + {Name: "generated_by", Type: field.TypeString}, + {Name: "collected_at", Type: field.TypeTime}, + {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, + {Name: "payload", Type: field.TypeString}, + } + // MetricsTable holds the schema information for the "metrics" table. + MetricsTable = &schema.Table{ + Name: "metrics", + Columns: MetricsColumns, + PrimaryKey: []*schema.Column{MetricsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "metric_generated_type_generated_by_collected_at", + Unique: true, + Columns: []*schema.Column{MetricsColumns[1], MetricsColumns[2], MetricsColumns[3]}, + }, + }, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ AlertsTable, @@ -252,6 +281,7 @@ var ( LocksTable, MachinesTable, MetaTable, + MetricsTable, } ) diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 365824de739..9c34162d8a1 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -19,7 +19,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/models" ) const ( @@ -39,6 +41,7 @@ const ( TypeLock = "Lock" TypeMachine = "Machine" TypeMeta = "Meta" + TypeMetric = "Metric" ) // AlertMutation represents an operation that mutates the Alert nodes in the graph. @@ -206,7 +209,7 @@ func (m *AlertMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Alert entity. // If the Alert object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -220,22 +223,9 @@ func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *AlertMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[alert.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *AlertMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[alert.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *AlertMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, alert.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -255,7 +245,7 @@ func (m *AlertMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Alert entity. // If the Alert object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -269,22 +259,9 @@ func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *AlertMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[alert.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *AlertMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[alert.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *AlertMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, alert.FieldUpdatedAt) } // SetScenario sets the "scenario" field. @@ -2039,12 +2016,6 @@ func (m *AlertMutation) AddField(name string, value ent.Value) error { // mutation. func (m *AlertMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(alert.FieldCreatedAt) { - fields = append(fields, alert.FieldCreatedAt) - } - if m.FieldCleared(alert.FieldUpdatedAt) { - fields = append(fields, alert.FieldUpdatedAt) - } if m.FieldCleared(alert.FieldBucketId) { fields = append(fields, alert.FieldBucketId) } @@ -2116,12 +2087,6 @@ func (m *AlertMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *AlertMutation) ClearField(name string) error { switch name { - case alert.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case alert.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case alert.FieldBucketId: m.ClearBucketId() return nil @@ -2431,6 +2396,9 @@ type BouncerMutation struct { until *time.Time last_pull *time.Time auth_type *string + osname *string + osversion *string + featureflags *string clearedFields map[string]struct{} done bool oldValue func(context.Context) (*Bouncer, error) @@ -2552,7 +2520,7 @@ func (m *BouncerMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -2566,22 +2534,9 @@ func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err e return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *BouncerMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[bouncer.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *BouncerMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[bouncer.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *BouncerMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, bouncer.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -2601,7 +2556,7 @@ func (m *BouncerMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -2615,22 +2570,9 @@ func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err e return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *BouncerMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[bouncer.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *BouncerMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[bouncer.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *BouncerMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, bouncer.FieldUpdatedAt) } // SetName sets the "name" field. @@ -3009,6 +2951,153 @@ func (m *BouncerMutation) ResetAuthType() { m.auth_type = nil } +// SetOsname sets the "osname" field. +func (m *BouncerMutation) SetOsname(s string) { + m.osname = &s +} + +// Osname returns the value of the "osname" field in the mutation. +func (m *BouncerMutation) Osname() (r string, exists bool) { + v := m.osname + if v == nil { + return + } + return *v, true +} + +// OldOsname returns the old "osname" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldOsname(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsname is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsname requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsname: %w", err) + } + return oldValue.Osname, nil +} + +// ClearOsname clears the value of the "osname" field. +func (m *BouncerMutation) ClearOsname() { + m.osname = nil + m.clearedFields[bouncer.FieldOsname] = struct{}{} +} + +// OsnameCleared returns if the "osname" field was cleared in this mutation. +func (m *BouncerMutation) OsnameCleared() bool { + _, ok := m.clearedFields[bouncer.FieldOsname] + return ok +} + +// ResetOsname resets all changes to the "osname" field. +func (m *BouncerMutation) ResetOsname() { + m.osname = nil + delete(m.clearedFields, bouncer.FieldOsname) +} + +// SetOsversion sets the "osversion" field. +func (m *BouncerMutation) SetOsversion(s string) { + m.osversion = &s +} + +// Osversion returns the value of the "osversion" field in the mutation. +func (m *BouncerMutation) Osversion() (r string, exists bool) { + v := m.osversion + if v == nil { + return + } + return *v, true +} + +// OldOsversion returns the old "osversion" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldOsversion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsversion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsversion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsversion: %w", err) + } + return oldValue.Osversion, nil +} + +// ClearOsversion clears the value of the "osversion" field. +func (m *BouncerMutation) ClearOsversion() { + m.osversion = nil + m.clearedFields[bouncer.FieldOsversion] = struct{}{} +} + +// OsversionCleared returns if the "osversion" field was cleared in this mutation. +func (m *BouncerMutation) OsversionCleared() bool { + _, ok := m.clearedFields[bouncer.FieldOsversion] + return ok +} + +// ResetOsversion resets all changes to the "osversion" field. +func (m *BouncerMutation) ResetOsversion() { + m.osversion = nil + delete(m.clearedFields, bouncer.FieldOsversion) +} + +// SetFeatureflags sets the "featureflags" field. +func (m *BouncerMutation) SetFeatureflags(s string) { + m.featureflags = &s +} + +// Featureflags returns the value of the "featureflags" field in the mutation. +func (m *BouncerMutation) Featureflags() (r string, exists bool) { + v := m.featureflags + if v == nil { + return + } + return *v, true +} + +// OldFeatureflags returns the old "featureflags" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldFeatureflags(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatureflags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatureflags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatureflags: %w", err) + } + return oldValue.Featureflags, nil +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (m *BouncerMutation) ClearFeatureflags() { + m.featureflags = nil + m.clearedFields[bouncer.FieldFeatureflags] = struct{}{} +} + +// FeatureflagsCleared returns if the "featureflags" field was cleared in this mutation. +func (m *BouncerMutation) FeatureflagsCleared() bool { + _, ok := m.clearedFields[bouncer.FieldFeatureflags] + return ok +} + +// ResetFeatureflags resets all changes to the "featureflags" field. +func (m *BouncerMutation) ResetFeatureflags() { + m.featureflags = nil + delete(m.clearedFields, bouncer.FieldFeatureflags) +} + // Where appends a list predicates to the BouncerMutation builder. func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) @@ -3043,7 +3132,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 11) + fields := make([]string, 0, 14) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -3077,6 +3166,15 @@ func (m *BouncerMutation) Fields() []string { if m.auth_type != nil { fields = append(fields, bouncer.FieldAuthType) } + if m.osname != nil { + fields = append(fields, bouncer.FieldOsname) + } + if m.osversion != nil { + fields = append(fields, bouncer.FieldOsversion) + } + if m.featureflags != nil { + fields = append(fields, bouncer.FieldFeatureflags) + } return fields } @@ -3107,6 +3205,12 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.LastPull() case bouncer.FieldAuthType: return m.AuthType() + case bouncer.FieldOsname: + return m.Osname() + case bouncer.FieldOsversion: + return m.Osversion() + case bouncer.FieldFeatureflags: + return m.Featureflags() } return nil, false } @@ -3138,6 +3242,12 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldLastPull(ctx) case bouncer.FieldAuthType: return m.OldAuthType(ctx) + case bouncer.FieldOsname: + return m.OldOsname(ctx) + case bouncer.FieldOsversion: + return m.OldOsversion(ctx) + case bouncer.FieldFeatureflags: + return m.OldFeatureflags(ctx) } return nil, fmt.Errorf("unknown Bouncer field %s", name) } @@ -3224,6 +3334,27 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetAuthType(v) return nil + case bouncer.FieldOsname: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsname(v) + return nil + case bouncer.FieldOsversion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsversion(v) + return nil + case bouncer.FieldFeatureflags: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatureflags(v) + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -3254,12 +3385,6 @@ func (m *BouncerMutation) AddField(name string, value ent.Value) error { // mutation. func (m *BouncerMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(bouncer.FieldCreatedAt) { - fields = append(fields, bouncer.FieldCreatedAt) - } - if m.FieldCleared(bouncer.FieldUpdatedAt) { - fields = append(fields, bouncer.FieldUpdatedAt) - } if m.FieldCleared(bouncer.FieldIPAddress) { fields = append(fields, bouncer.FieldIPAddress) } @@ -3272,6 +3397,15 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldUntil) { fields = append(fields, bouncer.FieldUntil) } + if m.FieldCleared(bouncer.FieldOsname) { + fields = append(fields, bouncer.FieldOsname) + } + if m.FieldCleared(bouncer.FieldOsversion) { + fields = append(fields, bouncer.FieldOsversion) + } + if m.FieldCleared(bouncer.FieldFeatureflags) { + fields = append(fields, bouncer.FieldFeatureflags) + } return fields } @@ -3286,12 +3420,6 @@ func (m *BouncerMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *BouncerMutation) ClearField(name string) error { switch name { - case bouncer.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case bouncer.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case bouncer.FieldIPAddress: m.ClearIPAddress() return nil @@ -3304,6 +3432,15 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldUntil: m.ClearUntil() return nil + case bouncer.FieldOsname: + m.ClearOsname() + return nil + case bouncer.FieldOsversion: + m.ClearOsversion() + return nil + case bouncer.FieldFeatureflags: + m.ClearFeatureflags() + return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } @@ -3345,6 +3482,15 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldAuthType: m.ResetAuthType() return nil + case bouncer.FieldOsname: + m.ResetOsname() + return nil + case bouncer.FieldOsversion: + m.ResetOsversion() + return nil + case bouncer.FieldFeatureflags: + m.ResetFeatureflags() + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -3528,7 +3674,7 @@ func (m *ConfigItemMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the ConfigItem entity. // If the ConfigItem object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -3542,22 +3688,9 @@ func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, er return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *ConfigItemMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[configitem.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *ConfigItemMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[configitem.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *ConfigItemMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, configitem.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -3577,7 +3710,7 @@ func (m *ConfigItemMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the ConfigItem entity. // If the ConfigItem object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -3591,22 +3724,9 @@ func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, er return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *ConfigItemMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[configitem.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *ConfigItemMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[configitem.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *ConfigItemMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, configitem.FieldUpdatedAt) } // SetName sets the "name" field. @@ -3827,14 +3947,7 @@ func (m *ConfigItemMutation) AddField(name string, value ent.Value) error { // ClearedFields returns all nullable fields that were cleared during this // mutation. func (m *ConfigItemMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(configitem.FieldCreatedAt) { - fields = append(fields, configitem.FieldCreatedAt) - } - if m.FieldCleared(configitem.FieldUpdatedAt) { - fields = append(fields, configitem.FieldUpdatedAt) - } - return fields + return nil } // FieldCleared returns a boolean indicating if a field with the given name was @@ -3847,14 +3960,6 @@ func (m *ConfigItemMutation) FieldCleared(name string) bool { // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. func (m *ConfigItemMutation) ClearField(name string) error { - switch name { - case configitem.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case configitem.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil - } return fmt.Errorf("unknown ConfigItem nullable field %s", name) } @@ -4075,7 +4180,7 @@ func (m *DecisionMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Decision entity. // If the Decision object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -4089,22 +4194,9 @@ func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *DecisionMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[decision.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *DecisionMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[decision.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *DecisionMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, decision.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -4124,7 +4216,7 @@ func (m *DecisionMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Decision entity. // If the Decision object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -4138,22 +4230,9 @@ func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *DecisionMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[decision.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *DecisionMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[decision.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *DecisionMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, decision.FieldUpdatedAt) } // SetUntil sets the "until" field. @@ -5287,12 +5366,6 @@ func (m *DecisionMutation) AddField(name string, value ent.Value) error { // mutation. func (m *DecisionMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(decision.FieldCreatedAt) { - fields = append(fields, decision.FieldCreatedAt) - } - if m.FieldCleared(decision.FieldUpdatedAt) { - fields = append(fields, decision.FieldUpdatedAt) - } if m.FieldCleared(decision.FieldUntil) { fields = append(fields, decision.FieldUntil) } @@ -5331,12 +5404,6 @@ func (m *DecisionMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *DecisionMutation) ClearField(name string) error { switch name { - case decision.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case decision.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case decision.FieldUntil: m.ClearUntil() return nil @@ -5628,7 +5695,7 @@ func (m *EventMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Event entity. // If the Event object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *EventMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -5642,22 +5709,9 @@ func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *EventMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[event.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *EventMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[event.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *EventMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, event.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -5677,7 +5731,7 @@ func (m *EventMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Event entity. // If the Event object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -5691,22 +5745,9 @@ func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *EventMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[event.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *EventMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[event.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *EventMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, event.FieldUpdatedAt) } // SetTime sets the "time" field. @@ -6034,12 +6075,6 @@ func (m *EventMutation) AddField(name string, value ent.Value) error { // mutation. func (m *EventMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(event.FieldCreatedAt) { - fields = append(fields, event.FieldCreatedAt) - } - if m.FieldCleared(event.FieldUpdatedAt) { - fields = append(fields, event.FieldUpdatedAt) - } if m.FieldCleared(event.FieldAlertEvents) { fields = append(fields, event.FieldAlertEvents) } @@ -6057,12 +6092,6 @@ func (m *EventMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *EventMutation) ClearField(name string) error { switch name { - case event.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case event.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case event.FieldAlertEvents: m.ClearAlertEvents() return nil @@ -6565,6 +6594,10 @@ type MachineMutation struct { isValidated *bool status *string auth_type *string + osname *string + osversion *string + featureflags *string + hubstate **models.HubItems clearedFields map[string]struct{} alerts map[int]struct{} removedalerts map[int]struct{} @@ -6689,7 +6722,7 @@ func (m *MachineMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -6703,22 +6736,9 @@ func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err e return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *MachineMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[machine.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *MachineMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[machine.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *MachineMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, machine.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -6738,7 +6758,7 @@ func (m *MachineMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -6752,22 +6772,9 @@ func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err e return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *MachineMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[machine.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *MachineMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[machine.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *MachineMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, machine.FieldUpdatedAt) } // SetLastPush sets the "last_push" field. @@ -7195,34 +7202,230 @@ func (m *MachineMutation) ResetAuthType() { m.auth_type = nil } -// AddAlertIDs adds the "alerts" edge to the Alert entity by ids. -func (m *MachineMutation) AddAlertIDs(ids ...int) { - if m.alerts == nil { - m.alerts = make(map[int]struct{}) - } - for i := range ids { - m.alerts[ids[i]] = struct{}{} - } -} - -// ClearAlerts clears the "alerts" edge to the Alert entity. -func (m *MachineMutation) ClearAlerts() { - m.clearedalerts = true +// SetOsname sets the "osname" field. +func (m *MachineMutation) SetOsname(s string) { + m.osname = &s } -// AlertsCleared reports if the "alerts" edge to the Alert entity was cleared. -func (m *MachineMutation) AlertsCleared() bool { - return m.clearedalerts +// Osname returns the value of the "osname" field in the mutation. +func (m *MachineMutation) Osname() (r string, exists bool) { + v := m.osname + if v == nil { + return + } + return *v, true } -// RemoveAlertIDs removes the "alerts" edge to the Alert entity by IDs. -func (m *MachineMutation) RemoveAlertIDs(ids ...int) { - if m.removedalerts == nil { - m.removedalerts = make(map[int]struct{}) +// OldOsname returns the old "osname" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldOsname(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsname is only allowed on UpdateOne operations") } - for i := range ids { - delete(m.alerts, ids[i]) - m.removedalerts[ids[i]] = struct{}{} + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsname requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsname: %w", err) + } + return oldValue.Osname, nil +} + +// ClearOsname clears the value of the "osname" field. +func (m *MachineMutation) ClearOsname() { + m.osname = nil + m.clearedFields[machine.FieldOsname] = struct{}{} +} + +// OsnameCleared returns if the "osname" field was cleared in this mutation. +func (m *MachineMutation) OsnameCleared() bool { + _, ok := m.clearedFields[machine.FieldOsname] + return ok +} + +// ResetOsname resets all changes to the "osname" field. +func (m *MachineMutation) ResetOsname() { + m.osname = nil + delete(m.clearedFields, machine.FieldOsname) +} + +// SetOsversion sets the "osversion" field. +func (m *MachineMutation) SetOsversion(s string) { + m.osversion = &s +} + +// Osversion returns the value of the "osversion" field in the mutation. +func (m *MachineMutation) Osversion() (r string, exists bool) { + v := m.osversion + if v == nil { + return + } + return *v, true +} + +// OldOsversion returns the old "osversion" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldOsversion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsversion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsversion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsversion: %w", err) + } + return oldValue.Osversion, nil +} + +// ClearOsversion clears the value of the "osversion" field. +func (m *MachineMutation) ClearOsversion() { + m.osversion = nil + m.clearedFields[machine.FieldOsversion] = struct{}{} +} + +// OsversionCleared returns if the "osversion" field was cleared in this mutation. +func (m *MachineMutation) OsversionCleared() bool { + _, ok := m.clearedFields[machine.FieldOsversion] + return ok +} + +// ResetOsversion resets all changes to the "osversion" field. +func (m *MachineMutation) ResetOsversion() { + m.osversion = nil + delete(m.clearedFields, machine.FieldOsversion) +} + +// SetFeatureflags sets the "featureflags" field. +func (m *MachineMutation) SetFeatureflags(s string) { + m.featureflags = &s +} + +// Featureflags returns the value of the "featureflags" field in the mutation. +func (m *MachineMutation) Featureflags() (r string, exists bool) { + v := m.featureflags + if v == nil { + return + } + return *v, true +} + +// OldFeatureflags returns the old "featureflags" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldFeatureflags(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatureflags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatureflags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatureflags: %w", err) + } + return oldValue.Featureflags, nil +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (m *MachineMutation) ClearFeatureflags() { + m.featureflags = nil + m.clearedFields[machine.FieldFeatureflags] = struct{}{} +} + +// FeatureflagsCleared returns if the "featureflags" field was cleared in this mutation. +func (m *MachineMutation) FeatureflagsCleared() bool { + _, ok := m.clearedFields[machine.FieldFeatureflags] + return ok +} + +// ResetFeatureflags resets all changes to the "featureflags" field. +func (m *MachineMutation) ResetFeatureflags() { + m.featureflags = nil + delete(m.clearedFields, machine.FieldFeatureflags) +} + +// SetHubstate sets the "hubstate" field. +func (m *MachineMutation) SetHubstate(mi *models.HubItems) { + m.hubstate = &mi +} + +// Hubstate returns the value of the "hubstate" field in the mutation. +func (m *MachineMutation) Hubstate() (r *models.HubItems, exists bool) { + v := m.hubstate + if v == nil { + return + } + return *v, true +} + +// OldHubstate returns the old "hubstate" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldHubstate(ctx context.Context) (v *models.HubItems, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHubstate is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHubstate requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHubstate: %w", err) + } + return oldValue.Hubstate, nil +} + +// ClearHubstate clears the value of the "hubstate" field. +func (m *MachineMutation) ClearHubstate() { + m.hubstate = nil + m.clearedFields[machine.FieldHubstate] = struct{}{} +} + +// HubstateCleared returns if the "hubstate" field was cleared in this mutation. +func (m *MachineMutation) HubstateCleared() bool { + _, ok := m.clearedFields[machine.FieldHubstate] + return ok +} + +// ResetHubstate resets all changes to the "hubstate" field. +func (m *MachineMutation) ResetHubstate() { + m.hubstate = nil + delete(m.clearedFields, machine.FieldHubstate) +} + +// AddAlertIDs adds the "alerts" edge to the Alert entity by ids. +func (m *MachineMutation) AddAlertIDs(ids ...int) { + if m.alerts == nil { + m.alerts = make(map[int]struct{}) + } + for i := range ids { + m.alerts[ids[i]] = struct{}{} + } +} + +// ClearAlerts clears the "alerts" edge to the Alert entity. +func (m *MachineMutation) ClearAlerts() { + m.clearedalerts = true +} + +// AlertsCleared reports if the "alerts" edge to the Alert entity was cleared. +func (m *MachineMutation) AlertsCleared() bool { + return m.clearedalerts +} + +// RemoveAlertIDs removes the "alerts" edge to the Alert entity by IDs. +func (m *MachineMutation) RemoveAlertIDs(ids ...int) { + if m.removedalerts == nil { + m.removedalerts = make(map[int]struct{}) + } + for i := range ids { + delete(m.alerts, ids[i]) + m.removedalerts[ids[i]] = struct{}{} } } @@ -7283,7 +7486,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 12) + fields := make([]string, 0, 16) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7320,6 +7523,18 @@ func (m *MachineMutation) Fields() []string { if m.auth_type != nil { fields = append(fields, machine.FieldAuthType) } + if m.osname != nil { + fields = append(fields, machine.FieldOsname) + } + if m.osversion != nil { + fields = append(fields, machine.FieldOsversion) + } + if m.featureflags != nil { + fields = append(fields, machine.FieldFeatureflags) + } + if m.hubstate != nil { + fields = append(fields, machine.FieldHubstate) + } return fields } @@ -7352,6 +7567,14 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Status() case machine.FieldAuthType: return m.AuthType() + case machine.FieldOsname: + return m.Osname() + case machine.FieldOsversion: + return m.Osversion() + case machine.FieldFeatureflags: + return m.Featureflags() + case machine.FieldHubstate: + return m.Hubstate() } return nil, false } @@ -7385,6 +7608,14 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldStatus(ctx) case machine.FieldAuthType: return m.OldAuthType(ctx) + case machine.FieldOsname: + return m.OldOsname(ctx) + case machine.FieldOsversion: + return m.OldOsversion(ctx) + case machine.FieldFeatureflags: + return m.OldFeatureflags(ctx) + case machine.FieldHubstate: + return m.OldHubstate(ctx) } return nil, fmt.Errorf("unknown Machine field %s", name) } @@ -7478,6 +7709,34 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetAuthType(v) return nil + case machine.FieldOsname: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsname(v) + return nil + case machine.FieldOsversion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsversion(v) + return nil + case machine.FieldFeatureflags: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatureflags(v) + return nil + case machine.FieldHubstate: + v, ok := value.(*models.HubItems) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHubstate(v) + return nil } return fmt.Errorf("unknown Machine field %s", name) } @@ -7508,12 +7767,6 @@ func (m *MachineMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MachineMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(machine.FieldCreatedAt) { - fields = append(fields, machine.FieldCreatedAt) - } - if m.FieldCleared(machine.FieldUpdatedAt) { - fields = append(fields, machine.FieldUpdatedAt) - } if m.FieldCleared(machine.FieldLastPush) { fields = append(fields, machine.FieldLastPush) } @@ -7529,6 +7782,18 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldStatus) { fields = append(fields, machine.FieldStatus) } + if m.FieldCleared(machine.FieldOsname) { + fields = append(fields, machine.FieldOsname) + } + if m.FieldCleared(machine.FieldOsversion) { + fields = append(fields, machine.FieldOsversion) + } + if m.FieldCleared(machine.FieldFeatureflags) { + fields = append(fields, machine.FieldFeatureflags) + } + if m.FieldCleared(machine.FieldHubstate) { + fields = append(fields, machine.FieldHubstate) + } return fields } @@ -7543,12 +7808,6 @@ func (m *MachineMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MachineMutation) ClearField(name string) error { switch name { - case machine.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case machine.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case machine.FieldLastPush: m.ClearLastPush() return nil @@ -7564,6 +7823,18 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldStatus: m.ClearStatus() return nil + case machine.FieldOsname: + m.ClearOsname() + return nil + case machine.FieldOsversion: + m.ClearOsversion() + return nil + case machine.FieldFeatureflags: + m.ClearFeatureflags() + return nil + case machine.FieldHubstate: + m.ClearHubstate() + return nil } return fmt.Errorf("unknown Machine nullable field %s", name) } @@ -7608,6 +7879,18 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldAuthType: m.ResetAuthType() return nil + case machine.FieldOsname: + m.ResetOsname() + return nil + case machine.FieldOsversion: + m.ResetOsversion() + return nil + case machine.FieldFeatureflags: + m.ResetFeatureflags() + return nil + case machine.FieldHubstate: + m.ResetHubstate() + return nil } return fmt.Errorf("unknown Machine field %s", name) } @@ -7829,7 +8112,7 @@ func (m *MetaMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Meta entity. // If the Meta object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -7843,22 +8126,9 @@ func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err erro return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *MetaMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[meta.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *MetaMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[meta.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *MetaMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, meta.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -7878,7 +8148,7 @@ func (m *MetaMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Meta entity. // If the Meta object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -7892,22 +8162,9 @@ func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err erro return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *MetaMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[meta.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *MetaMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[meta.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *MetaMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, meta.FieldUpdatedAt) } // SetKey sets the "key" field. @@ -8235,12 +8492,6 @@ func (m *MetaMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MetaMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(meta.FieldCreatedAt) { - fields = append(fields, meta.FieldCreatedAt) - } - if m.FieldCleared(meta.FieldUpdatedAt) { - fields = append(fields, meta.FieldUpdatedAt) - } if m.FieldCleared(meta.FieldAlertMetas) { fields = append(fields, meta.FieldAlertMetas) } @@ -8258,12 +8509,6 @@ func (m *MetaMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MetaMutation) ClearField(name string) error { switch name { - case meta.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case meta.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case meta.FieldAlertMetas: m.ClearAlertMetas() return nil @@ -8367,3 +8612,567 @@ func (m *MetaMutation) ResetEdge(name string) error { } return fmt.Errorf("unknown Meta edge %s", name) } + +// MetricMutation represents an operation that mutates the Metric nodes in the graph. +type MetricMutation struct { + config + op Op + typ string + id *int + generated_type *metric.GeneratedType + generated_by *string + collected_at *time.Time + pushed_at *time.Time + payload *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Metric, error) + predicates []predicate.Metric +} + +var _ ent.Mutation = (*MetricMutation)(nil) + +// metricOption allows management of the mutation configuration using functional options. +type metricOption func(*MetricMutation) + +// newMetricMutation creates new mutation for the Metric entity. +func newMetricMutation(c config, op Op, opts ...metricOption) *MetricMutation { + m := &MetricMutation{ + config: c, + op: op, + typ: TypeMetric, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMetricID sets the ID field of the mutation. +func withMetricID(id int) metricOption { + return func(m *MetricMutation) { + var ( + err error + once sync.Once + value *Metric + ) + m.oldValue = func(ctx context.Context) (*Metric, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Metric.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMetric sets the old Metric of the mutation. +func withMetric(node *Metric) metricOption { + return func(m *MetricMutation) { + m.oldValue = func(context.Context) (*Metric, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MetricMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MetricMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MetricMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MetricMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Metric.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetGeneratedType sets the "generated_type" field. +func (m *MetricMutation) SetGeneratedType(mt metric.GeneratedType) { + m.generated_type = &mt +} + +// GeneratedType returns the value of the "generated_type" field in the mutation. +func (m *MetricMutation) GeneratedType() (r metric.GeneratedType, exists bool) { + v := m.generated_type + if v == nil { + return + } + return *v, true +} + +// OldGeneratedType returns the old "generated_type" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldGeneratedType(ctx context.Context) (v metric.GeneratedType, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeneratedType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeneratedType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeneratedType: %w", err) + } + return oldValue.GeneratedType, nil +} + +// ResetGeneratedType resets all changes to the "generated_type" field. +func (m *MetricMutation) ResetGeneratedType() { + m.generated_type = nil +} + +// SetGeneratedBy sets the "generated_by" field. +func (m *MetricMutation) SetGeneratedBy(s string) { + m.generated_by = &s +} + +// GeneratedBy returns the value of the "generated_by" field in the mutation. +func (m *MetricMutation) GeneratedBy() (r string, exists bool) { + v := m.generated_by + if v == nil { + return + } + return *v, true +} + +// OldGeneratedBy returns the old "generated_by" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldGeneratedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeneratedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeneratedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeneratedBy: %w", err) + } + return oldValue.GeneratedBy, nil +} + +// ResetGeneratedBy resets all changes to the "generated_by" field. +func (m *MetricMutation) ResetGeneratedBy() { + m.generated_by = nil +} + +// SetCollectedAt sets the "collected_at" field. +func (m *MetricMutation) SetCollectedAt(t time.Time) { + m.collected_at = &t +} + +// CollectedAt returns the value of the "collected_at" field in the mutation. +func (m *MetricMutation) CollectedAt() (r time.Time, exists bool) { + v := m.collected_at + if v == nil { + return + } + return *v, true +} + +// OldCollectedAt returns the old "collected_at" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldCollectedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCollectedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCollectedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCollectedAt: %w", err) + } + return oldValue.CollectedAt, nil +} + +// ResetCollectedAt resets all changes to the "collected_at" field. +func (m *MetricMutation) ResetCollectedAt() { + m.collected_at = nil +} + +// SetPushedAt sets the "pushed_at" field. +func (m *MetricMutation) SetPushedAt(t time.Time) { + m.pushed_at = &t +} + +// PushedAt returns the value of the "pushed_at" field in the mutation. +func (m *MetricMutation) PushedAt() (r time.Time, exists bool) { + v := m.pushed_at + if v == nil { + return + } + return *v, true +} + +// OldPushedAt returns the old "pushed_at" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldPushedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPushedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPushedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPushedAt: %w", err) + } + return oldValue.PushedAt, nil +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (m *MetricMutation) ClearPushedAt() { + m.pushed_at = nil + m.clearedFields[metric.FieldPushedAt] = struct{}{} +} + +// PushedAtCleared returns if the "pushed_at" field was cleared in this mutation. +func (m *MetricMutation) PushedAtCleared() bool { + _, ok := m.clearedFields[metric.FieldPushedAt] + return ok +} + +// ResetPushedAt resets all changes to the "pushed_at" field. +func (m *MetricMutation) ResetPushedAt() { + m.pushed_at = nil + delete(m.clearedFields, metric.FieldPushedAt) +} + +// SetPayload sets the "payload" field. +func (m *MetricMutation) SetPayload(s string) { + m.payload = &s +} + +// Payload returns the value of the "payload" field in the mutation. +func (m *MetricMutation) Payload() (r string, exists bool) { + v := m.payload + if v == nil { + return + } + return *v, true +} + +// OldPayload returns the old "payload" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldPayload(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPayload is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPayload requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPayload: %w", err) + } + return oldValue.Payload, nil +} + +// ResetPayload resets all changes to the "payload" field. +func (m *MetricMutation) ResetPayload() { + m.payload = nil +} + +// Where appends a list predicates to the MetricMutation builder. +func (m *MetricMutation) Where(ps ...predicate.Metric) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the MetricMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MetricMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Metric, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *MetricMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *MetricMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Metric). +func (m *MetricMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MetricMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.generated_type != nil { + fields = append(fields, metric.FieldGeneratedType) + } + if m.generated_by != nil { + fields = append(fields, metric.FieldGeneratedBy) + } + if m.collected_at != nil { + fields = append(fields, metric.FieldCollectedAt) + } + if m.pushed_at != nil { + fields = append(fields, metric.FieldPushedAt) + } + if m.payload != nil { + fields = append(fields, metric.FieldPayload) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MetricMutation) Field(name string) (ent.Value, bool) { + switch name { + case metric.FieldGeneratedType: + return m.GeneratedType() + case metric.FieldGeneratedBy: + return m.GeneratedBy() + case metric.FieldCollectedAt: + return m.CollectedAt() + case metric.FieldPushedAt: + return m.PushedAt() + case metric.FieldPayload: + return m.Payload() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MetricMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case metric.FieldGeneratedType: + return m.OldGeneratedType(ctx) + case metric.FieldGeneratedBy: + return m.OldGeneratedBy(ctx) + case metric.FieldCollectedAt: + return m.OldCollectedAt(ctx) + case metric.FieldPushedAt: + return m.OldPushedAt(ctx) + case metric.FieldPayload: + return m.OldPayload(ctx) + } + return nil, fmt.Errorf("unknown Metric field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetricMutation) SetField(name string, value ent.Value) error { + switch name { + case metric.FieldGeneratedType: + v, ok := value.(metric.GeneratedType) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeneratedType(v) + return nil + case metric.FieldGeneratedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeneratedBy(v) + return nil + case metric.FieldCollectedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCollectedAt(v) + return nil + case metric.FieldPushedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPushedAt(v) + return nil + case metric.FieldPayload: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPayload(v) + return nil + } + return fmt.Errorf("unknown Metric field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MetricMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MetricMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetricMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Metric numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MetricMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(metric.FieldPushedAt) { + fields = append(fields, metric.FieldPushedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MetricMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MetricMutation) ClearField(name string) error { + switch name { + case metric.FieldPushedAt: + m.ClearPushedAt() + return nil + } + return fmt.Errorf("unknown Metric nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MetricMutation) ResetField(name string) error { + switch name { + case metric.FieldGeneratedType: + m.ResetGeneratedType() + return nil + case metric.FieldGeneratedBy: + m.ResetGeneratedBy() + return nil + case metric.FieldCollectedAt: + m.ResetCollectedAt() + return nil + case metric.FieldPushedAt: + m.ResetPushedAt() + return nil + case metric.FieldPayload: + m.ResetPayload() + return nil + } + return fmt.Errorf("unknown Metric field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MetricMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MetricMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MetricMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MetricMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MetricMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MetricMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MetricMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Metric unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MetricMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Metric edge %s", name) +} diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go index ad2e6d3f327..8ad03e2fc48 100644 --- a/pkg/database/ent/predicate/predicate.go +++ b/pkg/database/ent/predicate/predicate.go @@ -29,3 +29,6 @@ type Machine func(*sql.Selector) // Meta is the predicate function for meta builders. type Meta func(*sql.Selector) + +// Metric is the predicate function for metric builders. +type Metric func(*sql.Selector) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 87073074563..641698c9ee2 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -26,8 +26,6 @@ func init() { alertDescCreatedAt := alertFields[0].Descriptor() // alert.DefaultCreatedAt holds the default value on creation for the created_at field. alert.DefaultCreatedAt = alertDescCreatedAt.Default.(func() time.Time) - // alert.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - alert.UpdateDefaultCreatedAt = alertDescCreatedAt.UpdateDefault.(func() time.Time) // alertDescUpdatedAt is the schema descriptor for updated_at field. alertDescUpdatedAt := alertFields[1].Descriptor() // alert.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -64,8 +62,6 @@ func init() { bouncerDescCreatedAt := bouncerFields[0].Descriptor() // bouncer.DefaultCreatedAt holds the default value on creation for the created_at field. bouncer.DefaultCreatedAt = bouncerDescCreatedAt.Default.(func() time.Time) - // bouncer.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - bouncer.UpdateDefaultCreatedAt = bouncerDescCreatedAt.UpdateDefault.(func() time.Time) // bouncerDescUpdatedAt is the schema descriptor for updated_at field. bouncerDescUpdatedAt := bouncerFields[1].Descriptor() // bouncer.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -94,8 +90,6 @@ func init() { configitemDescCreatedAt := configitemFields[0].Descriptor() // configitem.DefaultCreatedAt holds the default value on creation for the created_at field. configitem.DefaultCreatedAt = configitemDescCreatedAt.Default.(func() time.Time) - // configitem.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - configitem.UpdateDefaultCreatedAt = configitemDescCreatedAt.UpdateDefault.(func() time.Time) // configitemDescUpdatedAt is the schema descriptor for updated_at field. configitemDescUpdatedAt := configitemFields[1].Descriptor() // configitem.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -108,8 +102,6 @@ func init() { decisionDescCreatedAt := decisionFields[0].Descriptor() // decision.DefaultCreatedAt holds the default value on creation for the created_at field. decision.DefaultCreatedAt = decisionDescCreatedAt.Default.(func() time.Time) - // decision.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - decision.UpdateDefaultCreatedAt = decisionDescCreatedAt.UpdateDefault.(func() time.Time) // decisionDescUpdatedAt is the schema descriptor for updated_at field. decisionDescUpdatedAt := decisionFields[1].Descriptor() // decision.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -126,8 +118,6 @@ func init() { eventDescCreatedAt := eventFields[0].Descriptor() // event.DefaultCreatedAt holds the default value on creation for the created_at field. event.DefaultCreatedAt = eventDescCreatedAt.Default.(func() time.Time) - // event.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - event.UpdateDefaultCreatedAt = eventDescCreatedAt.UpdateDefault.(func() time.Time) // eventDescUpdatedAt is the schema descriptor for updated_at field. eventDescUpdatedAt := eventFields[1].Descriptor() // event.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -150,8 +140,6 @@ func init() { machineDescCreatedAt := machineFields[0].Descriptor() // machine.DefaultCreatedAt holds the default value on creation for the created_at field. machine.DefaultCreatedAt = machineDescCreatedAt.Default.(func() time.Time) - // machine.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - machine.UpdateDefaultCreatedAt = machineDescCreatedAt.UpdateDefault.(func() time.Time) // machineDescUpdatedAt is the schema descriptor for updated_at field. machineDescUpdatedAt := machineFields[1].Descriptor() // machine.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -188,8 +176,6 @@ func init() { metaDescCreatedAt := metaFields[0].Descriptor() // meta.DefaultCreatedAt holds the default value on creation for the created_at field. meta.DefaultCreatedAt = metaDescCreatedAt.Default.(func() time.Time) - // meta.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - meta.UpdateDefaultCreatedAt = metaDescCreatedAt.UpdateDefault.(func() time.Time) // metaDescUpdatedAt is the schema descriptor for updated_at field. metaDescUpdatedAt := metaFields[1].Descriptor() // meta.DefaultUpdatedAt holds the default value on creation for the updated_at field. diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index f2df9d7f09c..bda7cc7d0b9 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -19,10 +19,10 @@ func (Alert) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.String("scenario"), field.String("bucketId").Default("").Optional(), field.String("message").Default("").Optional(), diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 986a1bf3ba8..79b558351a4 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -16,10 +16,10 @@ func (Bouncer) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + StructTag(`json:"created_at"`), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), @@ -30,6 +30,9 @@ func (Bouncer) Fields() []ent.Field { field.Time("last_pull"). Default(types.UtcNow).StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), + field.String("osname").Optional(), + field.String("osversion").Optional(), + field.String("featureflags").Optional(), } } diff --git a/pkg/database/ent/schema/config.go b/pkg/database/ent/schema/config.go index f3320a9cce6..036c55908ba 100644 --- a/pkg/database/ent/schema/config.go +++ b/pkg/database/ent/schema/config.go @@ -11,21 +11,20 @@ type ConfigItem struct { ent.Schema } -// Fields of the Bouncer. func (ConfigItem) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + Immutable(). + StructTag(`json:"created_at"`), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), field.String("value").StructTag(`json:"value"`), // a json object } } -// Edges of the Bouncer. func (ConfigItem) Edges() []ent.Edge { return nil } diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go index b7a99fb7a70..d5193910146 100644 --- a/pkg/database/ent/schema/decision.go +++ b/pkg/database/ent/schema/decision.go @@ -19,10 +19,10 @@ func (Decision) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("until").Nillable().Optional().SchemaType(map[string]string{ dialect.MySQL: "datetime", }), diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go index 6b6d2733ff7..f982ebe9653 100644 --- a/pkg/database/ent/schema/event.go +++ b/pkg/database/ent/schema/event.go @@ -18,10 +18,10 @@ func (Event) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("time"), field.String("serialized").MaxLen(8191), field.Int("alert_events").Optional(), diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go index de87efff3f7..0d49bac1bf6 100644 --- a/pkg/database/ent/schema/lock.go +++ b/pkg/database/ent/schema/lock.go @@ -12,7 +12,7 @@ type Lock struct { func (Lock) Fields() []ent.Field { return []ent.Field{ - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().Immutable().StructTag(`json:"name"`), field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), } } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index e155c936071..7731360fcde 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -5,6 +5,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/crowdsec/pkg/models" ) // Machine holds the schema definition for the Machine entity. @@ -17,17 +18,19 @@ func (Machine) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("last_push"). Default(types.UtcNow). UpdateDefault(types.UtcNow).Nillable().Optional(), field.Time("last_heartbeat"). Default(types.UtcNow). UpdateDefault(types.UtcNow).Nillable().Optional(), - field.String("machineId").Unique(), + field.String("machineId"). + Unique(). + Immutable(), field.String("password").Sensitive(), field.String("ipAddress"), field.String("scenarios").MaxLen(100000).Optional(), @@ -36,9 +39,23 @@ func (Machine) Fields() []ent.Field { Default(false), field.String("status").Optional(), field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), + field.String("osname").Optional(), + field.String("osversion").Optional(), + field.String("featureflags").Optional(), + field.JSON("hubstate", &models.HubItems{}).Optional(), } } +//type HubItemState struct { +// Version string `json:"version"` +// Status string `json:"status"` +//} +// +//type HubState struct { +// // the key is the FQName (type:author/name) +// Items map[string]HubItemState `json:"hub_items"` +//} + // Edges of the Machine. func (Machine) Edges() []ent.Edge { return []ent.Edge{ diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go index 1a84bb1b667..877fffa8a2e 100644 --- a/pkg/database/ent/schema/meta.go +++ b/pkg/database/ent/schema/meta.go @@ -17,11 +17,10 @@ type Meta struct { func (Meta) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). - Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Default(types.UtcNow), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.String("key"), field.String("value").MaxLen(4095), field.Int("alert_metas").Optional(), diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go new file mode 100644 index 00000000000..13aaa69d045 --- /dev/null +++ b/pkg/database/ent/schema/metric.go @@ -0,0 +1,53 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Metric is actually a set of metrics collected by a device (logprocessor, bouncer, etc) at a given time. +type Metric struct { + ent.Schema +} + + +// TODO: +// respect unique index on (generated_type, generated_by, collected_at) +// when we send, set pushed_at +// housekeeping: retention period wrt collected_at? +// do we blindly trust collected_at? refuse if too old? refuse if too much in the future? + +// Fields of the Machine. +func (Metric) Fields() []ent.Field { + return []ent.Field{ + // XXX: type tout court? + field.Enum("generated_type"). + Values("LP", "RC"). + Immutable(). + Comment("Type of the metrics source: LP=logprocessor, RC=remediation"), + field.String("generated_by"). + Immutable(). + Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), + field.Time("collected_at"). + Immutable(). + Comment("When the metrics are collected/calculated at the source"), + field.Time("pushed_at"). + Nillable(). + Optional(). + Comment("When the metrics are sent to the console"), + // Can we have a json/jsonbb field? with two different schemas? + field.String("payload"). + Immutable(). + Comment("The actual metrics (item0)"), + } +} + +func (Metric) Indexes() []ent.Index { + return []ent.Index{ + // Don't store the same metrics multiple times. + index.Fields("generated_type", "generated_by", "collected_at"). + Unique(), + } + // XXX: we happy with the generated index name? +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 27b39c12502..bf8221ce4a5 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -28,6 +28,8 @@ type Tx struct { Machine *MachineClient // Meta is the client for interacting with the Meta builders. Meta *MetaClient + // Metric is the client for interacting with the Metric builders. + Metric *MetricClient // lazily loaded. client *Client @@ -167,6 +169,7 @@ func (tx *Tx) init() { tx.Lock = NewLockClient(tx.config) tx.Machine = NewMachineClient(tx.config) tx.Meta = NewMetaClient(tx.config) + tx.Metric = NewMetricClient(tx.config) } // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. diff --git a/pkg/database/flush.go b/pkg/database/flush.go index a7b364fa970..4828ebac907 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -85,6 +85,8 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched baJob.SingletonMode() scheduler.StartAsync() + // TODO: flush metrics here (MetricsMaxAge) + return scheduler, nil } diff --git a/pkg/database/machines.go b/pkg/database/machines.go index b9834e57e09..ac95e9c2eaf 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -2,12 +2,14 @@ package database import ( "fmt" + "strings" "time" "github.com/go-openapi/strfmt" "github.com/pkg/errors" "golang.org/x/crypto/bcrypt" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -16,6 +18,29 @@ import ( const CapiMachineID = types.CAPIOrigin const CapiListsMachineID = types.ListOrigin +func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models.BaseMetrics, hubItems *models.HubItems) error { + os := baseMetrics.Os + features := strings.Join(baseMetrics.FeatureFlags, ",") + + heartbeat := time.Unix(int64(baseMetrics.Meta.UtcNowTimestamp), 0) + + _, err := c.Ent.Machine. + Update(). + Where(machine.MachineIdEQ(machineID)). + SetNillableVersion(baseMetrics.Version). + SetOsname(os.Name). + SetOsversion(os.Version). + SetFeatureflags(features). + SetLastHeartbeat(heartbeat). + SetHubstate(hubItems). + // TODO: update scenarios + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update base machine metrics in database: %s", err) + } + return nil +} + func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) if err != nil { diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go new file mode 100644 index 00000000000..4a75f2c32e8 --- /dev/null +++ b/pkg/database/metrics.go @@ -0,0 +1,35 @@ +package database + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// TODO: +// what if they are alrady in the db (should get an error from the unique index) +// CollectMetricsToPush (count limit? including stale?) +// SetPushedMetrics +// RemoveOldMetrics +// avoid errors.Wrapf + + +func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, collectedAt time.Time, payload string) (*ent.Metric, error) { + metric, err := c.Ent.Metric. + Create(). + SetGeneratedType(generatedType). + SetGeneratedBy(generatedBy). + SetCollectedAt(collectedAt). + SetPayload(payload). + Save(c.CTX) + + if err != nil { + c.Log.Warningf("CreateMetric: %s", err) + return nil, errors.Wrapf(InsertFail, "creating metrics set for '%s' at %s", generatedBy, collectedAt) + } + + return metric, nil +} diff --git a/pkg/models/hub_item.go b/pkg/models/hub_item.go new file mode 100644 index 00000000000..eac2cd75fe9 --- /dev/null +++ b/pkg/models/hub_item.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// HubItem HubItem +// +// swagger:model HubItem +type HubItem struct { + + // status of the hub item (official, custom, tainted, etc.) + Status string `json:"status,omitempty"` + + // version of the hub item + Version string `json:"version,omitempty"` +} + +// Validate validates this hub item +func (m *HubItem) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this hub item based on context it is used +func (m *HubItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HubItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HubItem) UnmarshalBinary(b []byte) error { + var res HubItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/hub_items.go b/pkg/models/hub_items.go new file mode 100644 index 00000000000..78b9d90c082 --- /dev/null +++ b/pkg/models/hub_items.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// HubItems HubItems +// +// swagger:model HubItems +type HubItems map[string]HubItem + +// Validate validates this hub items +func (m HubItems) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + if val, ok := m[k]; ok { + if err := val.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this hub items based on the context it is used +func (m HubItems) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for k := range m { + + if val, ok := m[k]; ok { + if err := val.ContextValidate(ctx, formats); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 528463e4502..1d99389c311 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1047,6 +1047,8 @@ definitions: properties: console_options: $ref: '#/definitions/ConsoleOptions' + hub_items: + $ref: '#/definitions/HubItems' datasources: type: object description: Number of datasources per type @@ -1129,10 +1131,10 @@ definitions: type: integer description: Size, in seconds, of the window used to compute the metric utc_startup_timestamp: - type: number + type: integer description: UTC timestamp of the startup of the software utc_now_timestamp: - type: number + type: integer description: UTC timestamp of the current time MetricsLabels: title: MetricsLabels @@ -1146,6 +1148,21 @@ definitions: items: type: string description: enabled console options + HubItems: + title: HubItems + type: object + additionalProperties: + $ref: '#/definitions/HubItem' + HubItem: + title: HubItem + type: object + properties: + version: + type: string + description: version of the hub item + status: + type: string + description: status of the hub item (official, custom, tainted, etc.) ErrorResponse: type: "object" required: diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go index 10b6491d28a..197c215a451 100644 --- a/pkg/models/log_processors_metrics.go +++ b/pkg/models/log_processors_metrics.go @@ -95,6 +95,9 @@ type LogProcessorsMetricsItems0 struct { // Number of datasources per type Datasources map[string]int64 `json:"datasources,omitempty"` + + // hub items + HubItems HubItems `json:"hub_items,omitempty"` } // UnmarshalJSON unmarshals this object from a JSON structure @@ -111,6 +114,8 @@ func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` Datasources map[string]int64 `json:"datasources,omitempty"` + + HubItems HubItems `json:"hub_items,omitempty"` } if err := swag.ReadJSON(raw, &dataAO1); err != nil { return err @@ -120,6 +125,8 @@ func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { m.Datasources = dataAO1.Datasources + m.HubItems = dataAO1.HubItems + return nil } @@ -136,12 +143,16 @@ func (m LogProcessorsMetricsItems0) MarshalJSON() ([]byte, error) { ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` Datasources map[string]int64 `json:"datasources,omitempty"` + + HubItems HubItems `json:"hub_items,omitempty"` } dataAO1.ConsoleOptions = m.ConsoleOptions dataAO1.Datasources = m.Datasources + dataAO1.HubItems = m.HubItems + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) if errAO1 != nil { return nil, errAO1 @@ -163,6 +174,10 @@ func (m *LogProcessorsMetricsItems0) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateHubItems(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -187,6 +202,26 @@ func (m *LogProcessorsMetricsItems0) validateConsoleOptions(formats strfmt.Regis return nil } +func (m *LogProcessorsMetricsItems0) validateHubItems(formats strfmt.Registry) error { + + if swag.IsZero(m.HubItems) { // not required + return nil + } + + if m.HubItems != nil { + if err := m.HubItems.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hub_items") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("hub_items") + } + return err + } + } + + return nil +} + // ContextValidate validate this log processors metrics items0 based on the context it is used func (m *LogProcessorsMetricsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -200,6 +235,10 @@ func (m *LogProcessorsMetricsItems0) ContextValidate(ctx context.Context, format res = append(res, err) } + if err := m.contextValidateHubItems(ctx, formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -220,6 +259,24 @@ func (m *LogProcessorsMetricsItems0) contextValidateConsoleOptions(ctx context.C return nil } +func (m *LogProcessorsMetricsItems0) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { + + if swag.IsZero(m.HubItems) { // not required + return nil + } + + if err := m.HubItems.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hub_items") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("hub_items") + } + return err + } + + return nil +} + // MarshalBinary interface implementation func (m *LogProcessorsMetricsItems0) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/pkg/models/metrics_meta.go b/pkg/models/metrics_meta.go index 9320fb79489..30bccca0e7c 100644 --- a/pkg/models/metrics_meta.go +++ b/pkg/models/metrics_meta.go @@ -18,10 +18,10 @@ import ( type MetricsMeta struct { // UTC timestamp of the current time - UtcNowTimestamp float64 `json:"utc_now_timestamp,omitempty"` + UtcNowTimestamp int64 `json:"utc_now_timestamp,omitempty"` // UTC timestamp of the startup of the software - UtcStartupTimestamp float64 `json:"utc_startup_timestamp,omitempty"` + UtcStartupTimestamp int64 `json:"utc_startup_timestamp,omitempty"` // Size, in seconds, of the window used to compute the metric WindowSizeSeconds int64 `json:"window_size_seconds,omitempty"` diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 2c39aae3079..e39d691c1b6 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -66,7 +66,7 @@ teardown() { } @test "simulate one bouncer request with a valid cert" { - rune -0 curl -s --cert "${tmpdir}/bouncer.pem" --key "${tmpdir}/bouncer-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + rune -0 curl -f -s --cert "${tmpdir}/bouncer.pem" --key "${tmpdir}/bouncer-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_output "null" rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 2a04cc9bc20..f9f87a56de9 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -104,3 +104,40 @@ teardown() { rune -0 cscli machines prune assert_output 'No machines to prune.' } + +@test "usage metrics" { + # a registered log processor can send metrics for the console + token=$(lp_login) + usage_metrics="http://localhost:8080/v1/usage-metrics" + + payload=$(cat <<-EOT + remediation_components: [] + log_processors: + - + - version: "v1.0" + feature_flags: + - marshmallows + meta: + window_size_seconds: 600 + utc_startup_timestamp: 1707399316 + utc_now_timestamp: 1707485349 + os: + name: CentOS + version: "8" + metrics: + - name: logs_parsed + value: 5000 + unit: count + labels: {} + console_options: + - share_context + datasources: + syslog: 1 + file: 4 + EOT + ) + + echo -e "$payload" >/tmp/bbb + rune -0 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + refute_output +} diff --git a/test/bats/97_ipv4_single.bats b/test/bats/97_ipv4_single.bats index 1ada1c4646b..8ba6ccee29d 100644 --- a/test/bats/97_ipv4_single.bats +++ b/test/bats/97_ipv4_single.bats @@ -24,7 +24,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" } #---------- diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index 982976d70ed..7b3c16f7580 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -24,7 +24,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" } #---------- diff --git a/test/bats/98_ipv4_range.bats b/test/bats/98_ipv4_range.bats index b0f6f482944..cf7f40c6d90 100644 --- a/test/bats/98_ipv4_range.bats +++ b/test/bats/98_ipv4_range.bats @@ -24,7 +24,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" } #---------- diff --git a/test/bats/98_ipv6_range.bats b/test/bats/98_ipv6_range.bats index d3c347583da..052a9c094a4 100644 --- a/test/bats/98_ipv6_range.bats +++ b/test/bats/98_ipv6_range.bats @@ -24,7 +24,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" } #---------- diff --git a/test/bats/99_lapi-stream-mode-scenario.bats b/test/bats/99_lapi-stream-mode-scenario.bats index 9b4d562f3c9..8616be59b72 100644 --- a/test/bats/99_lapi-stream-mode-scenario.bats +++ b/test/bats/99_lapi-stream-mode-scenario.bats @@ -26,7 +26,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key:${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key:${API_KEY}" "${CROWDSEC_API_URL}${URI}" } output_new_decisions() { diff --git a/test/bats/99_lapi-stream-mode-scopes.bats b/test/bats/99_lapi-stream-mode-scopes.bats index a1d01c489e6..63a8f0d895e 100644 --- a/test/bats/99_lapi-stream-mode-scopes.bats +++ b/test/bats/99_lapi-stream-mode-scopes.bats @@ -25,7 +25,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" } @test "adding decisions for multiple scopes" { diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index 08ddde42c5f..070fa71ef01 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -25,7 +25,7 @@ setup() { api() { URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" + curl -f -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" } @test "adding decisions for multiple ips" { diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 1aca32fa6d0..50df2695a64 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -276,3 +276,19 @@ rune() { run --separate-stderr "$@" } export -f rune + +# as a log processor, connect to lapi and get a token +lp_login() { + local cred + cred=$(config_get .api.client.credentials_path) + local url + url="$(yq '.url' < "$cred")/v1/watchers/login" + local resp + resp=$(yq -oj -I0 '{"machine_id":.login,"password":.password}' < "$cred" | curl -s -X POST "$url" --data-binary @-) + if [[ "$(yq -e '.code' <<<"$resp")" != 200 ]]; then + echo "login_lp: failed to login" >&3 + return 1 + fi + echo "$resp" | yq -r '.token' +} +export -f lp_login From d9a3819ef59bf92fe9fa9fc6111b34d67b079c2f Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 8 Mar 2024 13:35:10 +0100 Subject: [PATCH 004/119] ignore duplicate data points --- pkg/apiserver/controllers/v1/usagemetrics.go | 6 ++- pkg/csconfig/crowdsec_service.go | 10 ++--- pkg/database/flush.go | 43 ++++++++++++++++++-- pkg/database/metrics.go | 13 ++++-- 4 files changed, 57 insertions(+), 15 deletions(-) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 8fda148cee4..f77239e56c6 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -131,11 +131,13 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { } if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, collectedAt, string(jsonPayload)); err != nil { - log.Errorf("Failed to store usage metrics: %s", err) + log.Error(err) c.HandleDBErrors(gctx, err) return } - // empty body + // if CreateMetrics() returned nil, the metric was already there, we're good + // and don't split hair about 201 vs 200/204 + gctx.Status(http.StatusCreated) } diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index f4103293f0d..c54b850f622 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -12,6 +12,11 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" ) +const ( + defaultMetricsInterval = 30 * time.Minute + minimumMetricsInterval = 15 * time.Minute +) + // CrowdsecServiceCfg contains the location of parsers/scenarios/... and acquisition files type CrowdsecServiceCfg struct { Enable *bool `yaml:"enable"` @@ -143,11 +148,6 @@ func (c *Config) LoadCrowdsec() error { return nil } -const ( - defaultMetricsInterval = 30 * time.Second - minimumMetricsInterval = 15 * time.Second -) - func (c *CrowdsecServiceCfg) setMetricsInterval() { switch { case c.MetricsInterval == nil: diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 4828ebac907..73c9adb9d48 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -7,15 +7,24 @@ import ( "github.com/go-co-op/gocron" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/types" ) +const ( + // how long to keep metrics in the local database + defaultMetricsMaxAge = 7 * 24 * time.Hour + flushInterval = 1 * time.Minute +) + func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 @@ -32,7 +41,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) - job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) + job, err := scheduler.Every(flushInterval).Do(c.FlushAlerts, maxAge, maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) } @@ -77,19 +86,45 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched log.Warning("bouncers auto-delete for login/password auth is not supported (use cert or api)") } } - baJob, err := scheduler.Every(1).Minute().Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) } baJob.SingletonMode() - scheduler.StartAsync() - // TODO: flush metrics here (MetricsMaxAge) + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, config.MetricsMaxAge) + if err != nil { + return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) + } + + metricsJob.SingletonMode() + + scheduler.StartAsync() return scheduler, nil } +// flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not +func (c *Client) flushMetrics(maxAge *time.Duration) { + if maxAge == nil { + maxAge = ptr.Of(defaultMetricsMaxAge) + } + + c.Log.Debugf("flushing metrics older than %s", maxAge) + + deleted, err := c.Ent.Metric.Delete().Where( + metric.CollectedAtLTE(time.Now().UTC().Add(-*maxAge)), + ).Exec(c.CTX) + if err != nil { + c.Log.Errorf("while flushing metrics: %s", err) + return + } + + if deleted > 0 { + c.Log.Debugf("flushed %d metrics snapshots", deleted) + } +} func (c *Client) FlushOrphans() { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 4a75f2c32e8..bd525449741 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -1,10 +1,9 @@ package database import ( + "fmt" "time" - "github.com/pkg/errors" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) @@ -26,9 +25,15 @@ func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy st SetPayload(payload). Save(c.CTX) - if err != nil { + switch { + case ent.IsConstraintError(err): + // pretty safe guess, it's the unique index + c.Log.Infof("storing metrics snapshot for '%s' at %s: already exists", generatedBy, collectedAt) + // it's polite to accept a duplicate snapshot without any error + return nil, nil + case err != nil: c.Log.Warningf("CreateMetric: %s", err) - return nil, errors.Wrapf(InsertFail, "creating metrics set for '%s' at %s", generatedBy, collectedAt) + return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, collectedAt, InsertFail) } return metric, nil From 876c33945d92c2a48f01d0c3629fc312c5a30694 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 8 Mar 2024 14:37:48 +0100 Subject: [PATCH 005/119] lint --- pkg/database/machines.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index ac95e9c2eaf..9f0495d3237 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -22,7 +22,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models. os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - heartbeat := time.Unix(int64(baseMetrics.Meta.UtcNowTimestamp), 0) + heartbeat := time.Unix(baseMetrics.Meta.UtcNowTimestamp, 0) _, err := c.Ent.Machine. Update(). From b00e553ce934cf839c65bf3eff11bf6f9e1e0988 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 11 Mar 2024 16:05:28 +0100 Subject: [PATCH 006/119] interval messages --- pkg/csconfig/crowdsec_service.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index c54b850f622..3acb53763e5 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -151,13 +151,13 @@ func (c *Config) LoadCrowdsec() error { func (c *CrowdsecServiceCfg) setMetricsInterval() { switch { case c.MetricsInterval == nil: - c.MetricsInterval = ptr.Of(defaultMetricsInterval) log.Tracef("metrics_interval is not set, default to %s", defaultMetricsInterval) + c.MetricsInterval = ptr.Of(defaultMetricsInterval) case *c.MetricsInterval == time.Duration(0): log.Info("metrics_interval is set to 0, disabling metrics") case *c.MetricsInterval < minimumMetricsInterval: + log.Warnf("metrics_interval is too low (%s), setting it to %s", *c.MetricsInterval, minimumMetricsInterval) c.MetricsInterval = ptr.Of(minimumMetricsInterval) - log.Warnf("metrics_interval is too low, setting it to %s", minimumMetricsInterval) default: log.Tracef("metrics_interval set to %s", c.MetricsInterval) } From 95f38d97d85b81239abde7159077208d7ac6132c Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 11 Mar 2024 16:13:30 +0100 Subject: [PATCH 007/119] explicit message for 404 --- cmd/crowdsec/lpmetrics.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 2d43a4a4462..08961434446 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -164,6 +164,9 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { case errors.Is(err, context.DeadlineExceeded): m.logger.Warnf("timeout sending lp metrics") continue + case err != nil && resp != nil && resp.Response.StatusCode == http.StatusNotFound: + m.logger.Warnf("metrics endpoint not found, older LAPI?") + continue case err != nil: m.logger.Warnf("failed to send lp metrics: %s", err) continue From 11499c8d9e4a4e493e33d67e509e1b188cbbf360 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 21 Mar 2024 10:27:37 +0100 Subject: [PATCH 008/119] display more LP information in cscli --- cmd/crowdsec-cli/machines_table.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go index 120929ea654..933d597e250 100644 --- a/cmd/crowdsec-cli/machines_table.go +++ b/cmd/crowdsec-cli/machines_table.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "io" "time" @@ -10,11 +11,20 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/emoji" ) +var tableHeaders = []string{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Feature Flags", "Last Heartbeat"} + func getAgentsTable(out io.Writer, machines []*ent.Machine) { t := newLightTable(out) - t.SetHeaders("Name", "IP Address", "Last Update", "Status", "Version", "Auth Type", "Last Heartbeat") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaders(tableHeaders...) + + alignment := []table.Alignment{} + + for i := 0; i < len(tableHeaders); i++ { + alignment = append(alignment, table.AlignLeft) + } + + t.SetHeaderAlignment(alignment...) + t.SetAlignment(alignment...) for _, m := range machines { validated := emoji.Prohibited @@ -27,7 +37,7 @@ func getAgentsTable(out io.Writer, machines []*ent.Machine) { hb = emoji.Warning + " " + hb } - t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb) + t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, fmt.Sprintf("%s/%s", m.Osname, m.Osversion), m.AuthType, m.Featureflags, hb) } t.Render() From dbdf3ad1bbf388e935df89f4793cf029ae3fa582 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 21 Mar 2024 10:27:52 +0100 Subject: [PATCH 009/119] wip --- cmd/crowdsec/lpmetrics.go | 73 +++++++++++++++-------------- pkg/apiserver/apic_metrics.go | 88 ++++++++++++++++++++++++++++++++++- pkg/apiserver/apiserver.go | 12 ++++- pkg/database/metrics.go | 33 ++++++++++++- pkg/fflag/crowdsec.go | 6 +++ 5 files changed, 173 insertions(+), 39 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 08961434446..19e0ac79882 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -4,39 +4,40 @@ import ( "context" "errors" "net/http" - "github.com/sirupsen/logrus" - "github.com/blackfireio/osinfo" "time" + "github.com/blackfireio/osinfo" + "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/pkg/acquisition" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/crowdsecurity/crowdsec/pkg/fflag" - "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/fflag" + "github.com/crowdsecurity/crowdsec/pkg/models" ) // MetricsProvider collects metrics from the LP and sends them to the LAPI type MetricsProvider struct { - apic *apiclient.ApiClient + apic *apiclient.ApiClient interval time.Duration - static staticMetrics - logger *logrus.Entry + static staticMetrics + logger *logrus.Entry } type staticMetrics struct { - osName string - osVersion string - startupTS int64 - featureFlags []string - consoleOptions []string - datasourceMap map[string]int64 - hubState models.HubItems + osName string + osVersion string + startupTS int64 + featureFlags []string + consoleOptions []string + datasourceMap map[string]int64 + hubState models.HubItems } func getHubState(hub *cwhub.Hub) models.HubItems { @@ -96,50 +97,48 @@ func detectOS() (string, string) { return osInfo.Name, osInfo.Version } - func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, - consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { return &MetricsProvider{ - apic: apic, + apic: apic, interval: interval, - logger: logger, - static: newStaticMetrics(consoleOptions, datasources, hub), + logger: logger, + static: newStaticMetrics(consoleOptions, datasources, hub), } } func (m *MetricsProvider) metricsPayload() *models.AllMetrics { meta := &models.MetricsMeta{ UtcStartupTimestamp: m.static.startupTS, - WindowSizeSeconds: int64(m.interval.Seconds()), + WindowSizeSeconds: int64(m.interval.Seconds()), } os := &models.OSversion{ - Name: m.static.osName, + Name: m.static.osName, Version: m.static.osVersion, } base := models.BaseMetrics{ - Meta: meta, - Os: os, - Version: ptr.Of(cwversion.VersionStr()), + Meta: meta, + Os: os, + Version: ptr.Of(cwversion.VersionStr()), FeatureFlags: m.static.featureFlags, } item0 := &models.LogProcessorsMetricsItems0{ - BaseMetrics: base, + BaseMetrics: base, ConsoleOptions: m.static.consoleOptions, - Datasources: m.static.datasourceMap, - HubItems: m.static.hubState, + Datasources: m.static.datasourceMap, + HubItems: m.static.hubState, } // TODO: more metric details... ? return &models.AllMetrics{ - LogProcessors: []models.LogProcessorsMetrics{{item0}}, + LogProcessors: []models.LogProcessorsMetrics{{item0}}, } } - func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/MetricsProvider.Run") @@ -149,7 +148,7 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { met := m.metricsPayload() - ticker := time.NewTicker(m.interval) + ticker := time.NewTicker(1) //Send on start for { select { @@ -177,6 +176,8 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { continue } + ticker.Reset(m.interval) + m.logger.Tracef("lp usage metrics sent") case <-myTomb.Dying(): ticker.Stop() diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 128ce5a9639..75696604dab 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -2,11 +2,15 @@ package apiserver import ( "context" + "encoding/json" + "strings" "time" - log "github.com/sirupsen/logrus" "slices" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" @@ -14,6 +18,67 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) { + lpsMetrics, err := a.dbClient.GetLPsUsageMetrics() + + if err != nil { + return nil, err + } + + spew.Dump(lpsMetrics) + + bouncersMetrics, err := a.dbClient.GetBouncersUsageMetrics() + if err != nil { + return nil, err + } + + spew.Dump(bouncersMetrics) + + allMetrics := &models.AllMetrics{} + + allLps := a.dbClient.ListMachines() + allBouncers := a.dbClient.ListBouncers() + + for _, lpsMetric := range lpsMetrics { + lpName := lpsMetric.GeneratedBy + metrics := models.LogProcessorsMetricsItems0{} + + err := json.Unmarshal([]byte(lpsMetric.Payload), &metrics) + + if err != nil { + log.Errorf("unable to unmarshal LPs metrics (%s)", err) + continue + } + + lp, err := a.dbClient.QueryMachineByID(lpName) + + if err != nil { + log.Errorf("unable to get LP information for %s: %s", lpName, err) + continue + } + + if lp.Hubstate != nil { + metrics.HubItems = *lp.Hubstate + } + + metrics.Os = &models.OSversion{ + Name: lp.Osname, + Version: lp.Osversion, + } + + metrics.FeatureFlags = strings.Split(lp.Featureflags, ",") + metrics.Version = &lp.Version + //TODO: meta + + } + + //bouncerInfos := make(map[string]string) + + //TODO: add LAPI metrics + + return allMetrics, nil +} + func (a *apic) GetMetrics() (*models.Metrics, error) { machines, err := a.dbClient.ListMachines() if err != nil { @@ -160,3 +225,24 @@ func (a *apic) SendMetrics(stop chan (bool)) { } } } + +func (a *apic) SendUsageMetrics() { + defer trace.CatchPanic("lapi/usageMetricsToAPIC") + + ticker := time.NewTicker(5 * time.Second) + + for { + select { + case <-a.metricsTomb.Dying(): + //The normal metrics routine also kills push/pull tombs, does that make sense ? + ticker.Stop() + return + case <-ticker.C: + _, err := a.GetUsageMetrics() + if err != nil { + log.Errorf("unable to get usage metrics (%s)", err) + } + + } + } +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 7989cfc1d97..0c34f5a8587 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -25,6 +25,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -360,6 +361,15 @@ func (s *APIServer) Run(apiReady chan bool) error { s.apic.SendMetrics(make(chan bool)) return nil }) + + if fflag.CAPIUsageMetrics.IsEnabled() { + log.Infof("CAPI_USAGE_METRICS flag is enabled, starting usage metrics routine") + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics() + return nil + }) + } + } s.httpServerTomb.Go(func() error { @@ -368,7 +378,7 @@ func (s *APIServer) Run(apiReady chan bool) error { if err := s.httpServerTomb.Wait(); err != nil { return fmt.Errorf("local API server stopped with error: %w", err) - } + } return nil } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index bd525449741..22f699a7866 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -15,7 +15,6 @@ import ( // RemoveOldMetrics // avoid errors.Wrapf - func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, collectedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. Create(). @@ -38,3 +37,35 @@ func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy st return metric, nil } + +func (c *Client) GetLPsUsageMetrics() ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeLP), + metric.PushedAtIsNil(), + ). + Order(ent.Desc(metric.FieldCollectedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetLPsUsageMetrics: %s", err) + return nil, fmt.Errorf("getting LPs usage metrics: %w", err) + } + + return metrics, nil +} + +func (c *Client) GetBouncersUsageMetrics() ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + metric.PushedAtIsNil(), + ). + Order(ent.Desc(metric.FieldCollectedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetBouncersUsageMetrics: %s", err) + return nil, fmt.Errorf("getting bouncers usage metrics: %w", err) + } + + return metrics, nil +} diff --git a/pkg/fflag/crowdsec.go b/pkg/fflag/crowdsec.go index d42d6a05ef6..e5c80a19be2 100644 --- a/pkg/fflag/crowdsec.go +++ b/pkg/fflag/crowdsec.go @@ -8,6 +8,7 @@ var ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Descript var PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} var Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} var Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +var CAPIUsageMetrics = &Feature{Name: "capi_usage_metrics", Description: "Enable usage metrics push to CAPI"} func RegisterAllFeatures() error { err := Crowdsec.RegisterFeature(CscliSetup) @@ -40,5 +41,10 @@ func RegisterAllFeatures() error { return err } + err = Crowdsec.RegisterFeature(CAPIUsageMetrics) + if err != nil { + return err + } + return nil } From 33778ca87f9050f8b4f6a2b7c1ff8d4e62cc902c Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 22 Mar 2024 14:19:05 +0100 Subject: [PATCH 010/119] wip --- pkg/apiserver/apic_metrics.go | 80 +++++++++++++++++++++++++++++------ 1 file changed, 68 insertions(+), 12 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 75696604dab..03c8b14ce98 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -3,18 +3,19 @@ package apiserver import ( "context" "encoding/json" + "fmt" "strings" "time" "slices" - "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -25,36 +26,53 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) { return nil, err } - spew.Dump(lpsMetrics) + //spew.Dump(lpsMetrics) bouncersMetrics, err := a.dbClient.GetBouncersUsageMetrics() if err != nil { return nil, err } - spew.Dump(bouncersMetrics) + //spew.Dump(bouncersMetrics) allMetrics := &models.AllMetrics{} - allLps := a.dbClient.ListMachines() - allBouncers := a.dbClient.ListBouncers() + /*allLps, err := a.dbClient.ListMachines() + + if err != nil { + return nil, err + } + + allBouncers, err := a.dbClient.ListBouncers() + + if err != nil { + return nil, err + }*/ + + lpsCache := make(map[string]*ent.Machine) + bouncersCache := make(map[string]*ent.Bouncer) for _, lpsMetric := range lpsMetrics { lpName := lpsMetric.GeneratedBy metrics := models.LogProcessorsMetricsItems0{} err := json.Unmarshal([]byte(lpsMetric.Payload), &metrics) - if err != nil { log.Errorf("unable to unmarshal LPs metrics (%s)", err) continue } - lp, err := a.dbClient.QueryMachineByID(lpName) + var lp *ent.Machine - if err != nil { - log.Errorf("unable to get LP information for %s: %s", lpName, err) - continue + if _, ok := lpsCache[lpName]; !ok { + lp, err = a.dbClient.QueryMachineByID(lpName) + + if err != nil { + log.Errorf("unable to get LP information for %s: %s", lpName, err) + continue + } + } else { + lp = lpsCache[lpName] } if lp.Hubstate != nil { @@ -70,6 +88,40 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) { metrics.Version = &lp.Version //TODO: meta + allMetrics.LogProcessors = append(allMetrics.LogProcessors, models.LogProcessorsMetrics{&metrics}) + } + + for _, bouncersMetric := range bouncersMetrics { + bouncerName := bouncersMetric.GeneratedBy + metrics := models.RemediationComponentsMetricsItems0{} + + err := json.Unmarshal([]byte(bouncersMetric.Payload), &metrics) + if err != nil { + log.Errorf("unable to unmarshal bouncers metrics (%s)", err) + continue + } + + var bouncer *ent.Bouncer + + if _, ok := bouncersCache[bouncerName]; !ok { + bouncer, err = a.dbClient.SelectBouncerByName(bouncerName) + if err != nil { + log.Errorf("unable to get bouncer information for %s: %s", bouncerName, err) + continue + } + } else { + bouncer = bouncersCache[bouncerName] + } + + metrics.Os = &models.OSversion{ + Name: bouncer.Osname, + Version: bouncer.Osversion, + } + metrics.Type = bouncer.Type + metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") + //TODO: meta + + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, models.RemediationComponentsMetrics{&metrics}) } //bouncerInfos := make(map[string]string) @@ -238,11 +290,15 @@ func (a *apic) SendUsageMetrics() { ticker.Stop() return case <-ticker.C: - _, err := a.GetUsageMetrics() + metrics, err := a.GetUsageMetrics() if err != nil { log.Errorf("unable to get usage metrics (%s)", err) } - + jsonStr, err := json.Marshal(metrics) + if err != nil { + log.Errorf("unable to marshal usage metrics (%s)", err) + } + fmt.Printf("Usage metrics: %s\n", string(jsonStr)) } } } From 0be7cc3cfc875684d5df1da471665c661bfdad50 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 19 Apr 2024 18:02:19 +0200 Subject: [PATCH 011/119] store more payload --- pkg/apiserver/controllers/v1/usagemetrics.go | 48 ++++++++++---------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index f77239e56c6..d630943fdc3 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -3,19 +3,18 @@ package v1 import ( "encoding/json" "fmt" - "net/http" + "net/http" "time" - "github.com/gin-gonic/gin" - "github.com/go-openapi/strfmt" - log "github.com/sirupsen/logrus" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/models" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/models" ) - // updateBaseMetrics updates the base metrics for a machine or bouncer func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics *models.BaseMetrics, hubItems *models.HubItems) error { switch { @@ -30,24 +29,23 @@ func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, b return nil } - // UsageMetrics receives metrics from log processors and remediation components func (c *Controller) UsageMetrics(gctx *gin.Context) { - var input models.AllMetrics + var input models.AllMetrics // parse the payload - if err := gctx.ShouldBindJSON(&input); err != nil { - log.Errorf("Failed to bind json: %s", err) - gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) - return - } + if err := gctx.ShouldBindJSON(&input); err != nil { + log.Errorf("Failed to bind json: %s", err) + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } - if err := input.Validate(strfmt.Default); err != nil { - log.Errorf("Failed to validate usage metrics: %s", err) - c.HandleDBErrors(gctx, err) - return - } + if err := input.Validate(strfmt.Default); err != nil { + log.Errorf("Failed to validate usage metrics: %s", err) + c.HandleDBErrors(gctx, err) + return + } // TODO: validate payload with the right type, depending on auth context @@ -74,9 +72,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { // TODO: if both or none are set, which error should we return? var ( - payload map[string]any + payload map[string]any baseMetrics models.BaseMetrics - hubItems models.HubItems + hubItems models.HubItems ) switch len(input.LogProcessors) { @@ -89,6 +87,8 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { payload = map[string]any{ "console_options": item0.ConsoleOptions, "datasources": item0.Datasources, + "metrics": item0.Metrics, + "meta": item0.Meta, } baseMetrics = item0.BaseMetrics hubItems = item0.HubItems @@ -105,7 +105,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { case 1: item0 := input.RemediationComponents[0][0] payload = map[string]any{ - "type": item0.Type, + "type": item0.Type, + "metrics": item0.Metrics, + "meta": item0.Meta, // TODO: RC stuff like traffic stats } baseMetrics = item0.BaseMetrics From be64f619f25b57059b258fa80013492ad4ee5dc0 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 19 Apr 2024 18:33:46 +0200 Subject: [PATCH 012/119] set pushed_at after sending metrics --- pkg/apiserver/apic_metrics.go | 37 +++++++++++++++++------------------ pkg/database/metrics.go | 13 ++++++++++++ 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 03c8b14ce98..dc1af60806c 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -19,36 +19,25 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) { +func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpsMetrics, err := a.dbClient.GetLPsUsageMetrics() + metricsIds := make([]int, 0) if err != nil { - return nil, err + return nil, nil, err } //spew.Dump(lpsMetrics) bouncersMetrics, err := a.dbClient.GetBouncersUsageMetrics() if err != nil { - return nil, err + return nil, nil, err } //spew.Dump(bouncersMetrics) allMetrics := &models.AllMetrics{} - /*allLps, err := a.dbClient.ListMachines() - - if err != nil { - return nil, err - } - - allBouncers, err := a.dbClient.ListBouncers() - - if err != nil { - return nil, err - }*/ - lpsCache := make(map[string]*ent.Machine) bouncersCache := make(map[string]*ent.Bouncer) @@ -86,9 +75,9 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) { metrics.FeatureFlags = strings.Split(lp.Featureflags, ",") metrics.Version = &lp.Version - //TODO: meta allMetrics.LogProcessors = append(allMetrics.LogProcessors, models.LogProcessorsMetrics{&metrics}) + metricsIds = append(metricsIds, lpsMetric.ID) } for _, bouncersMetric := range bouncersMetrics { @@ -119,16 +108,21 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, error) { } metrics.Type = bouncer.Type metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") - //TODO: meta + metrics.Version = &bouncer.Version allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, models.RemediationComponentsMetrics{&metrics}) + metricsIds = append(metricsIds, bouncersMetric.ID) } //bouncerInfos := make(map[string]string) //TODO: add LAPI metrics - return allMetrics, nil + return allMetrics, metricsIds, nil +} + +func (a *apic) MarkUsageMetricsAsSent(ids []int) error { + return a.dbClient.MarkUsageMetricsAsSent(ids) } func (a *apic) GetMetrics() (*models.Metrics, error) { @@ -290,7 +284,7 @@ func (a *apic) SendUsageMetrics() { ticker.Stop() return case <-ticker.C: - metrics, err := a.GetUsageMetrics() + metrics, metricsId, err := a.GetUsageMetrics() if err != nil { log.Errorf("unable to get usage metrics (%s)", err) } @@ -299,6 +293,11 @@ func (a *apic) SendUsageMetrics() { log.Errorf("unable to marshal usage metrics (%s)", err) } fmt.Printf("Usage metrics: %s\n", string(jsonStr)) + //TODO: actually send the data + err = a.MarkUsageMetricsAsSent(metricsId) + if err != nil { + log.Errorf("unable to mark usage metrics as sent (%s)", err) + } } } } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 22f699a7866..c7932367b34 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -69,3 +69,16 @@ func (c *Client) GetBouncersUsageMetrics() ([]*ent.Metric, error) { return metrics, nil } + +func (c *Client) MarkUsageMetricsAsSent(ids []int) error { + _, err := c.Ent.Metric.Update(). + Where(metric.IDIn(ids...)). + SetPushedAt(time.Now()). + Save(c.CTX) + if err != nil { + c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) + return fmt.Errorf("marking usage metrics as sent: %w", err) + } + + return nil +} From 2b940a45f8bdd41d257bdd0c11cea8ce6169ab96 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 09:58:48 +0200 Subject: [PATCH 013/119] up --- pkg/apiserver/apic_metrics.go | 24 ++++++++++++++------ pkg/models/localapi_swagger.yaml | 12 ++++++++++ pkg/models/log_processors_metrics.go | 22 ++++++++++++++++++ pkg/models/remediation_components_metrics.go | 22 ++++++++++++++++++ 4 files changed, 73 insertions(+), 7 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index dc1af60806c..ab13a69fef2 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -3,7 +3,6 @@ package apiserver import ( "context" "encoding/json" - "fmt" "strings" "time" @@ -76,6 +75,9 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.FeatureFlags = strings.Split(lp.Featureflags, ",") metrics.Version = &lp.Version + metrics.Name = lpName + metrics.LastPush = lp.LastPush.UTC().Unix() + allMetrics.LogProcessors = append(allMetrics.LogProcessors, models.LogProcessorsMetrics{&metrics}) metricsIds = append(metricsIds, lpsMetric.ID) } @@ -109,6 +111,8 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.Type = bouncer.Type metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") metrics.Version = &bouncer.Version + metrics.Name = bouncerName + metrics.LastPull = bouncer.LastPull.UTC().Unix() allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, models.RemediationComponentsMetrics{&metrics}) metricsIds = append(metricsIds, bouncersMetric.ID) @@ -288,15 +292,21 @@ func (a *apic) SendUsageMetrics() { if err != nil { log.Errorf("unable to get usage metrics (%s)", err) } - jsonStr, err := json.Marshal(metrics) + /*jsonStr, err := json.Marshal(metrics) if err != nil { log.Errorf("unable to marshal usage metrics (%s)", err) - } - fmt.Printf("Usage metrics: %s\n", string(jsonStr)) - //TODO: actually send the data - err = a.MarkUsageMetricsAsSent(metricsId) + }*/ + //fmt.Printf("Usage metrics: %s\n", string(jsonStr)) + _, _, err = a.apiClient.UsageMetrics.Add(context.Background(), metrics) + if err != nil { - log.Errorf("unable to mark usage metrics as sent (%s)", err) + log.Errorf("unable to send usage metrics (%s)", err) + } else { + + err = a.MarkUsageMetricsAsSent(metricsId) + if err != nil { + log.Errorf("unable to mark usage metrics as sent (%s)", err) + } } } } diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 1d99389c311..4485490ee9f 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1036,6 +1036,12 @@ definitions: type: type: string description: type of the remediation component + name: + type: string + description: name of the remediation component + last_pull: + type: integer + description: last pull date LogProcessorsMetrics: title: LogProcessorsMetrics type: array @@ -1054,6 +1060,12 @@ definitions: description: Number of datasources per type additionalProperties: type: integer + name: + type: string + description: name of the log processor + last_push: + type: integer + description: last push date AllMetrics: title: AllMetrics type: object diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go index 197c215a451..f605e4ea413 100644 --- a/pkg/models/log_processors_metrics.go +++ b/pkg/models/log_processors_metrics.go @@ -98,6 +98,12 @@ type LogProcessorsMetricsItems0 struct { // hub items HubItems HubItems `json:"hub_items,omitempty"` + + // last push date + LastPush int64 `json:"last_push,omitempty"` + + // name of the log processor + Name string `json:"name,omitempty"` } // UnmarshalJSON unmarshals this object from a JSON structure @@ -116,6 +122,10 @@ func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { Datasources map[string]int64 `json:"datasources,omitempty"` HubItems HubItems `json:"hub_items,omitempty"` + + LastPush int64 `json:"last_push,omitempty"` + + Name string `json:"name,omitempty"` } if err := swag.ReadJSON(raw, &dataAO1); err != nil { return err @@ -127,6 +137,10 @@ func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { m.HubItems = dataAO1.HubItems + m.LastPush = dataAO1.LastPush + + m.Name = dataAO1.Name + return nil } @@ -145,6 +159,10 @@ func (m LogProcessorsMetricsItems0) MarshalJSON() ([]byte, error) { Datasources map[string]int64 `json:"datasources,omitempty"` HubItems HubItems `json:"hub_items,omitempty"` + + LastPush int64 `json:"last_push,omitempty"` + + Name string `json:"name,omitempty"` } dataAO1.ConsoleOptions = m.ConsoleOptions @@ -153,6 +171,10 @@ func (m LogProcessorsMetricsItems0) MarshalJSON() ([]byte, error) { dataAO1.HubItems = m.HubItems + dataAO1.LastPush = m.LastPush + + dataAO1.Name = m.Name + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) if errAO1 != nil { return nil, errAO1 diff --git a/pkg/models/remediation_components_metrics.go b/pkg/models/remediation_components_metrics.go index 5506558c13a..f3b5a08398b 100644 --- a/pkg/models/remediation_components_metrics.go +++ b/pkg/models/remediation_components_metrics.go @@ -90,6 +90,12 @@ func (m RemediationComponentsMetrics) ContextValidate(ctx context.Context, forma type RemediationComponentsMetricsItems0 struct { BaseMetrics + // last pull date + LastPull int64 `json:"last_pull,omitempty"` + + // name of the remediation component + Name string `json:"name,omitempty"` + // type of the remediation component Type string `json:"type,omitempty"` } @@ -105,12 +111,20 @@ func (m *RemediationComponentsMetricsItems0) UnmarshalJSON(raw []byte) error { // AO1 var dataAO1 struct { + LastPull int64 `json:"last_pull,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` } if err := swag.ReadJSON(raw, &dataAO1); err != nil { return err } + m.LastPull = dataAO1.LastPull + + m.Name = dataAO1.Name + m.Type = dataAO1.Type return nil @@ -126,9 +140,17 @@ func (m RemediationComponentsMetricsItems0) MarshalJSON() ([]byte, error) { } _parts = append(_parts, aO0) var dataAO1 struct { + LastPull int64 `json:"last_pull,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` } + dataAO1.LastPull = m.LastPull + + dataAO1.Name = m.Name + dataAO1.Type = m.Type jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) From c6ebd7ae04fffb7c0afcff35a14b097221a12682 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 11:40:13 +0200 Subject: [PATCH 014/119] handle 422 error from CAPI --- pkg/apiclient/resperr.go | 12 +++++++++--- pkg/apiclient/usagemetrics.go | 3 +-- pkg/apiserver/apic_metrics.go | 6 +++--- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index ff954a73609..6edd1334ec5 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -34,12 +34,18 @@ func CheckResponse(r *http.Response) error { data, err := io.ReadAll(r.Body) if err != nil || len(data) == 0 { - ret.Message = ptr.Of(fmt.Sprintf("http code %d, no error message", r.StatusCode)) + ret.Message = ptr.Of(fmt.Sprintf("http code %d, no response body", r.StatusCode)) return ret } - if err := json.Unmarshal(data, ret); err != nil { - return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) + switch r.StatusCode { + case 422: + ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid request: %s", r.StatusCode, string(data))) + default: + if err := json.Unmarshal(data, ret); err != nil { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid body: %s", r.StatusCode, string(data))) + return ret + } } return ret diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go index 6649913a459..c418f4843d6 100644 --- a/pkg/apiclient/usagemetrics.go +++ b/pkg/apiclient/usagemetrics.go @@ -11,13 +11,12 @@ import ( type UsageMetricsService service func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetrics) (interface{}, *Response, error) { - u := fmt.Sprintf("%s/usage-metrics/", s.client.URLPrefix) + u := fmt.Sprintf("%s/usage-metrics", s.client.URLPrefix) req, err := s.client.NewRequest(http.MethodPost, u, &metrics) if err != nil { return nil, nil, err } - var response interface{} resp, err := s.client.Do(ctx, req, &response) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index ab13a69fef2..6b32f74c391 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -290,7 +290,7 @@ func (a *apic) SendUsageMetrics() { case <-ticker.C: metrics, metricsId, err := a.GetUsageMetrics() if err != nil { - log.Errorf("unable to get usage metrics (%s)", err) + log.Errorf("unable to get usage metrics: %s", err) } /*jsonStr, err := json.Marshal(metrics) if err != nil { @@ -300,12 +300,12 @@ func (a *apic) SendUsageMetrics() { _, _, err = a.apiClient.UsageMetrics.Add(context.Background(), metrics) if err != nil { - log.Errorf("unable to send usage metrics (%s)", err) + log.Errorf("unable to send usage metrics: %s", err) } else { err = a.MarkUsageMetricsAsSent(metricsId) if err != nil { - log.Errorf("unable to mark usage metrics as sent (%s)", err) + log.Errorf("unable to mark usage metrics as sent: %s", err) } } } From f0853188ce3f3f246c55ee3358a275ed9ea5358d Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 14:25:23 +0200 Subject: [PATCH 015/119] update swagger and models --- cmd/crowdsec/lpmetrics.go | 6 +- pkg/apiserver/apic_metrics.go | 8 +- pkg/apiserver/controllers/v1/usagemetrics.go | 4 +- pkg/models/all_metrics.go | 76 ++++++++++----- pkg/models/localapi_swagger.yaml | 73 +++++++-------- pkg/models/log_processors_metrics.go | 99 +++----------------- pkg/models/remediation_components_metrics.go | 91 ++---------------- 7 files changed, 119 insertions(+), 238 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 19e0ac79882..cef905c9125 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -125,7 +125,7 @@ func (m *MetricsProvider) metricsPayload() *models.AllMetrics { FeatureFlags: m.static.featureFlags, } - item0 := &models.LogProcessorsMetricsItems0{ + met := &models.LogProcessorsMetrics{ BaseMetrics: base, ConsoleOptions: m.static.consoleOptions, Datasources: m.static.datasourceMap, @@ -135,7 +135,7 @@ func (m *MetricsProvider) metricsPayload() *models.AllMetrics { // TODO: more metric details... ? return &models.AllMetrics{ - LogProcessors: []models.LogProcessorsMetrics{{item0}}, + LogProcessors: []*models.LogProcessorsMetrics{met}, } } @@ -153,7 +153,7 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { for { select { case <-ticker.C: - met.LogProcessors[0][0].Meta.UtcNowTimestamp = time.Now().Unix() + met.LogProcessors[0].Meta.UtcNowTimestamp = time.Now().Unix() ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 6b32f74c391..9db8c80e787 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -42,7 +42,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { for _, lpsMetric := range lpsMetrics { lpName := lpsMetric.GeneratedBy - metrics := models.LogProcessorsMetricsItems0{} + metrics := models.LogProcessorsMetrics{} err := json.Unmarshal([]byte(lpsMetric.Payload), &metrics) if err != nil { @@ -78,13 +78,13 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.Name = lpName metrics.LastPush = lp.LastPush.UTC().Unix() - allMetrics.LogProcessors = append(allMetrics.LogProcessors, models.LogProcessorsMetrics{&metrics}) + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &metrics) metricsIds = append(metricsIds, lpsMetric.ID) } for _, bouncersMetric := range bouncersMetrics { bouncerName := bouncersMetric.GeneratedBy - metrics := models.RemediationComponentsMetricsItems0{} + metrics := models.RemediationComponentsMetrics{} err := json.Unmarshal([]byte(bouncersMetric.Payload), &metrics) if err != nil { @@ -114,7 +114,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.Name = bouncerName metrics.LastPull = bouncer.LastPull.UTC().Unix() - allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, models.RemediationComponentsMetrics{&metrics}) + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &metrics) metricsIds = append(metricsIds, bouncersMetric.ID) } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index d630943fdc3..1016ce3df2b 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -83,7 +83,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { case 1: // the final slice can't have more than one item, // guaranteed by the swagger schema - item0 := input.LogProcessors[0][0] + item0 := input.LogProcessors[0] payload = map[string]any{ "console_options": item0.ConsoleOptions, "datasources": item0.Datasources, @@ -103,7 +103,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { case 0: break case 1: - item0 := input.RemediationComponents[0][0] + item0 := input.RemediationComponents[0] payload = map[string]any{ "type": item0.Type, "metrics": item0.Metrics, diff --git a/pkg/models/all_metrics.go b/pkg/models/all_metrics.go index 27f4ead8cd4..4f0b958bd3d 100644 --- a/pkg/models/all_metrics.go +++ b/pkg/models/all_metrics.go @@ -20,10 +20,10 @@ import ( type AllMetrics struct { // log processors metrics - LogProcessors []LogProcessorsMetrics `json:"log_processors"` + LogProcessors []*LogProcessorsMetrics `json:"log_processors"` // remediation components metrics - RemediationComponents []RemediationComponentsMetrics `json:"remediation_components"` + RemediationComponents []*RemediationComponentsMetrics `json:"remediation_components"` } // Validate validates this all metrics @@ -50,14 +50,19 @@ func (m *AllMetrics) validateLogProcessors(formats strfmt.Registry) error { } for i := 0; i < len(m.LogProcessors); i++ { + if swag.IsZero(m.LogProcessors[i]) { // not required + continue + } - if err := m.LogProcessors[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + if m.LogProcessors[i] != nil { + if err := m.LogProcessors[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err } - return err } } @@ -71,14 +76,19 @@ func (m *AllMetrics) validateRemediationComponents(formats strfmt.Registry) erro } for i := 0; i < len(m.RemediationComponents); i++ { + if swag.IsZero(m.RemediationComponents[i]) { // not required + continue + } - if err := m.RemediationComponents[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + if m.RemediationComponents[i] != nil { + if err := m.RemediationComponents[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err } - return err } } @@ -108,13 +118,20 @@ func (m *AllMetrics) contextValidateLogProcessors(ctx context.Context, formats s for i := 0; i < len(m.LogProcessors); i++ { - if err := m.LogProcessors[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + if m.LogProcessors[i] != nil { + + if swag.IsZero(m.LogProcessors[i]) { // not required + return nil + } + + if err := m.LogProcessors[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err } - return err } } @@ -126,13 +143,20 @@ func (m *AllMetrics) contextValidateRemediationComponents(ctx context.Context, f for i := 0; i < len(m.RemediationComponents); i++ { - if err := m.RemediationComponents[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + if m.RemediationComponents[i] != nil { + + if swag.IsZero(m.RemediationComponents[i]) { // not required + return nil + } + + if err := m.RemediationComponents[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err } - return err } } diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 4485490ee9f..55ef9e29614 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1026,46 +1026,45 @@ definitions: type: string RemediationComponentsMetrics: title: RemediationComponentsMetrics - type: array - maxItems: 1 - items: - allOf: - - $ref: '#/definitions/BaseMetrics' - - type: object - properties: - type: - type: string - description: type of the remediation component - name: - type: string - description: name of the remediation component - last_pull: - type: integer - description: last pull date + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + type: + type: string + description: type of the remediation component + name: + type: string + description: name of the remediation component + last_pull: + type: integer + description: last pull date LogProcessorsMetrics: title: LogProcessorsMetrics - type: array - maxItems: 1 - items: - allOf: - - $ref: '#/definitions/BaseMetrics' - - type: object - properties: - console_options: - $ref: '#/definitions/ConsoleOptions' - hub_items: - $ref: '#/definitions/HubItems' - datasources: - type: object - description: Number of datasources per type - additionalProperties: - type: integer - name: - type: string - description: name of the log processor - last_push: + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + console_options: + $ref: '#/definitions/ConsoleOptions' + hub_items: + $ref: '#/definitions/HubItems' + datasources: + type: object + description: Number of datasources per type + additionalProperties: type: integer - description: last push date + name: + type: string + description: name of the log processor + last_push: + type: integer + description: last push date + #items: + # allOf: + # - $ref: '#/definitions/BaseMetrics' + # - type: object + AllMetrics: title: AllMetrics type: object diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go index f605e4ea413..c8c305d765a 100644 --- a/pkg/models/log_processors_metrics.go +++ b/pkg/models/log_processors_metrics.go @@ -7,87 +7,16 @@ package models import ( "context" - "strconv" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" - "github.com/go-openapi/validate" ) // LogProcessorsMetrics LogProcessorsMetrics // // swagger:model LogProcessorsMetrics -type LogProcessorsMetrics []*LogProcessorsMetricsItems0 - -// Validate validates this log processors metrics -func (m LogProcessorsMetrics) Validate(formats strfmt.Registry) error { - var res []error - - iLogProcessorsMetricsSize := int64(len(m)) - - if err := validate.MaxItems("", "body", iLogProcessorsMetricsSize, 1); err != nil { - return err - } - - for i := 0; i < len(m); i++ { - if swag.IsZero(m[i]) { // not required - continue - } - - if m[i] != nil { - if err := m[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(strconv.Itoa(i)) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContextValidate validate this log processors metrics based on the context it is used -func (m LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - for i := 0; i < len(m); i++ { - - if m[i] != nil { - - if swag.IsZero(m[i]) { // not required - return nil - } - - if err := m[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(strconv.Itoa(i)) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// LogProcessorsMetricsItems0 log processors metrics items0 -// -// swagger:model LogProcessorsMetricsItems0 -type LogProcessorsMetricsItems0 struct { +type LogProcessorsMetrics struct { BaseMetrics // console options @@ -107,7 +36,7 @@ type LogProcessorsMetricsItems0 struct { } // UnmarshalJSON unmarshals this object from a JSON structure -func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { +func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { // AO0 var aO0 BaseMetrics if err := swag.ReadJSON(raw, &aO0); err != nil { @@ -145,7 +74,7 @@ func (m *LogProcessorsMetricsItems0) UnmarshalJSON(raw []byte) error { } // MarshalJSON marshals this object to a JSON structure -func (m LogProcessorsMetricsItems0) MarshalJSON() ([]byte, error) { +func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { _parts := make([][]byte, 0, 2) aO0, err := swag.WriteJSON(m.BaseMetrics) @@ -183,8 +112,8 @@ func (m LogProcessorsMetricsItems0) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(_parts...), nil } -// Validate validates this log processors metrics items0 -func (m *LogProcessorsMetricsItems0) Validate(formats strfmt.Registry) error { +// Validate validates this log processors metrics +func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { var res []error // validation for a type composition with BaseMetrics @@ -206,7 +135,7 @@ func (m *LogProcessorsMetricsItems0) Validate(formats strfmt.Registry) error { return nil } -func (m *LogProcessorsMetricsItems0) validateConsoleOptions(formats strfmt.Registry) error { +func (m *LogProcessorsMetrics) validateConsoleOptions(formats strfmt.Registry) error { if swag.IsZero(m.ConsoleOptions) { // not required return nil @@ -224,7 +153,7 @@ func (m *LogProcessorsMetricsItems0) validateConsoleOptions(formats strfmt.Regis return nil } -func (m *LogProcessorsMetricsItems0) validateHubItems(formats strfmt.Registry) error { +func (m *LogProcessorsMetrics) validateHubItems(formats strfmt.Registry) error { if swag.IsZero(m.HubItems) { // not required return nil @@ -244,8 +173,8 @@ func (m *LogProcessorsMetricsItems0) validateHubItems(formats strfmt.Registry) e return nil } -// ContextValidate validate this log processors metrics items0 based on the context it is used -func (m *LogProcessorsMetricsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { +// ContextValidate validate this log processors metrics based on the context it is used +func (m *LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error // validation for a type composition with BaseMetrics @@ -267,7 +196,7 @@ func (m *LogProcessorsMetricsItems0) ContextValidate(ctx context.Context, format return nil } -func (m *LogProcessorsMetricsItems0) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { +func (m *LogProcessorsMetrics) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { if err := m.ConsoleOptions.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { @@ -281,7 +210,7 @@ func (m *LogProcessorsMetricsItems0) contextValidateConsoleOptions(ctx context.C return nil } -func (m *LogProcessorsMetricsItems0) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { +func (m *LogProcessorsMetrics) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { if swag.IsZero(m.HubItems) { // not required return nil @@ -300,7 +229,7 @@ func (m *LogProcessorsMetricsItems0) contextValidateHubItems(ctx context.Context } // MarshalBinary interface implementation -func (m *LogProcessorsMetricsItems0) MarshalBinary() ([]byte, error) { +func (m *LogProcessorsMetrics) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -308,8 +237,8 @@ func (m *LogProcessorsMetricsItems0) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *LogProcessorsMetricsItems0) UnmarshalBinary(b []byte) error { - var res LogProcessorsMetricsItems0 +func (m *LogProcessorsMetrics) UnmarshalBinary(b []byte) error { + var res LogProcessorsMetrics if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/pkg/models/remediation_components_metrics.go b/pkg/models/remediation_components_metrics.go index f3b5a08398b..ba3845d872a 100644 --- a/pkg/models/remediation_components_metrics.go +++ b/pkg/models/remediation_components_metrics.go @@ -7,87 +7,16 @@ package models import ( "context" - "strconv" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" - "github.com/go-openapi/validate" ) // RemediationComponentsMetrics RemediationComponentsMetrics // // swagger:model RemediationComponentsMetrics -type RemediationComponentsMetrics []*RemediationComponentsMetricsItems0 - -// Validate validates this remediation components metrics -func (m RemediationComponentsMetrics) Validate(formats strfmt.Registry) error { - var res []error - - iRemediationComponentsMetricsSize := int64(len(m)) - - if err := validate.MaxItems("", "body", iRemediationComponentsMetricsSize, 1); err != nil { - return err - } - - for i := 0; i < len(m); i++ { - if swag.IsZero(m[i]) { // not required - continue - } - - if m[i] != nil { - if err := m[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(strconv.Itoa(i)) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContextValidate validate this remediation components metrics based on the context it is used -func (m RemediationComponentsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - for i := 0; i < len(m); i++ { - - if m[i] != nil { - - if swag.IsZero(m[i]) { // not required - return nil - } - - if err := m[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(strconv.Itoa(i)) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// RemediationComponentsMetricsItems0 remediation components metrics items0 -// -// swagger:model RemediationComponentsMetricsItems0 -type RemediationComponentsMetricsItems0 struct { +type RemediationComponentsMetrics struct { BaseMetrics // last pull date @@ -101,7 +30,7 @@ type RemediationComponentsMetricsItems0 struct { } // UnmarshalJSON unmarshals this object from a JSON structure -func (m *RemediationComponentsMetricsItems0) UnmarshalJSON(raw []byte) error { +func (m *RemediationComponentsMetrics) UnmarshalJSON(raw []byte) error { // AO0 var aO0 BaseMetrics if err := swag.ReadJSON(raw, &aO0); err != nil { @@ -131,7 +60,7 @@ func (m *RemediationComponentsMetricsItems0) UnmarshalJSON(raw []byte) error { } // MarshalJSON marshals this object to a JSON structure -func (m RemediationComponentsMetricsItems0) MarshalJSON() ([]byte, error) { +func (m RemediationComponentsMetrics) MarshalJSON() ([]byte, error) { _parts := make([][]byte, 0, 2) aO0, err := swag.WriteJSON(m.BaseMetrics) @@ -161,8 +90,8 @@ func (m RemediationComponentsMetricsItems0) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(_parts...), nil } -// Validate validates this remediation components metrics items0 -func (m *RemediationComponentsMetricsItems0) Validate(formats strfmt.Registry) error { +// Validate validates this remediation components metrics +func (m *RemediationComponentsMetrics) Validate(formats strfmt.Registry) error { var res []error // validation for a type composition with BaseMetrics @@ -176,8 +105,8 @@ func (m *RemediationComponentsMetricsItems0) Validate(formats strfmt.Registry) e return nil } -// ContextValidate validate this remediation components metrics items0 based on the context it is used -func (m *RemediationComponentsMetricsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { +// ContextValidate validate this remediation components metrics based on the context it is used +func (m *RemediationComponentsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error // validation for a type composition with BaseMetrics @@ -192,7 +121,7 @@ func (m *RemediationComponentsMetricsItems0) ContextValidate(ctx context.Context } // MarshalBinary interface implementation -func (m *RemediationComponentsMetricsItems0) MarshalBinary() ([]byte, error) { +func (m *RemediationComponentsMetrics) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -200,8 +129,8 @@ func (m *RemediationComponentsMetricsItems0) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *RemediationComponentsMetricsItems0) UnmarshalBinary(b []byte) error { - var res RemediationComponentsMetricsItems0 +func (m *RemediationComponentsMetrics) UnmarshalBinary(b []byte) error { + var res RemediationComponentsMetrics if err := swag.ReadJSON(b, &res); err != nil { return err } From 7b093b925e3be2d7a37de74079a880255ad84a41 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 16:08:26 +0200 Subject: [PATCH 016/119] add lapi metrics in swagger and models --- pkg/models/all_metrics.go | 51 ++++++++++++++++++++++++++++++++ pkg/models/localapi_swagger.yaml | 15 ++++++---- 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/pkg/models/all_metrics.go b/pkg/models/all_metrics.go index 4f0b958bd3d..5865070e8ef 100644 --- a/pkg/models/all_metrics.go +++ b/pkg/models/all_metrics.go @@ -19,6 +19,9 @@ import ( // swagger:model AllMetrics type AllMetrics struct { + // lapi + Lapi *LapiMetrics `json:"lapi,omitempty"` + // log processors metrics LogProcessors []*LogProcessorsMetrics `json:"log_processors"` @@ -30,6 +33,10 @@ type AllMetrics struct { func (m *AllMetrics) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateLapi(formats); err != nil { + res = append(res, err) + } + if err := m.validateLogProcessors(formats); err != nil { res = append(res, err) } @@ -44,6 +51,25 @@ func (m *AllMetrics) Validate(formats strfmt.Registry) error { return nil } +func (m *AllMetrics) validateLapi(formats strfmt.Registry) error { + if swag.IsZero(m.Lapi) { // not required + return nil + } + + if m.Lapi != nil { + if err := m.Lapi.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("lapi") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("lapi") + } + return err + } + } + + return nil +} + func (m *AllMetrics) validateLogProcessors(formats strfmt.Registry) error { if swag.IsZero(m.LogProcessors) { // not required return nil @@ -100,6 +126,10 @@ func (m *AllMetrics) validateRemediationComponents(formats strfmt.Registry) erro func (m *AllMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error + if err := m.contextValidateLapi(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateLogProcessors(ctx, formats); err != nil { res = append(res, err) } @@ -114,6 +144,27 @@ func (m *AllMetrics) ContextValidate(ctx context.Context, formats strfmt.Registr return nil } +func (m *AllMetrics) contextValidateLapi(ctx context.Context, formats strfmt.Registry) error { + + if m.Lapi != nil { + + if swag.IsZero(m.Lapi) { // not required + return nil + } + + if err := m.Lapi.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("lapi") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("lapi") + } + return err + } + } + + return nil +} + func (m *AllMetrics) contextValidateLogProcessors(ctx context.Context, formats strfmt.Registry) error { for i := 0; i < len(m.LogProcessors); i++ { diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 55ef9e29614..5a1bee3cd21 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1060,11 +1060,14 @@ definitions: last_push: type: integer description: last push date - #items: - # allOf: - # - $ref: '#/definitions/BaseMetrics' - # - type: object - + LapiMetrics: + title: LapiMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + console_options: + $ref: '#/definitions/ConsoleOptions' AllMetrics: title: AllMetrics type: object @@ -1079,6 +1082,8 @@ definitions: items: $ref: '#/definitions/LogProcessorsMetrics' description: log processors metrics + lapi: + $ref: '#/definitions/LapiMetrics' BaseMetrics: title: BaseMetrics type: object From 18b8ddb49ab0a84c12fc6bfbd4e0b63f7b5acdb3 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 17:02:07 +0200 Subject: [PATCH 017/119] add missing file --- pkg/models/lapi_metrics.go | 157 +++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 pkg/models/lapi_metrics.go diff --git a/pkg/models/lapi_metrics.go b/pkg/models/lapi_metrics.go new file mode 100644 index 00000000000..b56d92ef1f8 --- /dev/null +++ b/pkg/models/lapi_metrics.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// LapiMetrics LapiMetrics +// +// swagger:model LapiMetrics +type LapiMetrics struct { + BaseMetrics + + // console options + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *LapiMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.ConsoleOptions = dataAO1.ConsoleOptions + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m LapiMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + } + + dataAO1.ConsoleOptions = m.ConsoleOptions + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this lapi metrics +func (m *LapiMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConsoleOptions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LapiMetrics) validateConsoleOptions(formats strfmt.Registry) error { + + if swag.IsZero(m.ConsoleOptions) { // not required + return nil + } + + if err := m.ConsoleOptions.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// ContextValidate validate this lapi metrics based on the context it is used +func (m *LapiMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConsoleOptions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LapiMetrics) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { + + if err := m.ConsoleOptions.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LapiMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LapiMetrics) UnmarshalBinary(b []byte) error { + var res LapiMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} From 538ab0bc62f47d6511de1b938ffc9d862eaa6d96 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 17:44:42 +0200 Subject: [PATCH 018/119] up --- pkg/apiserver/apic_metrics.go | 43 +++++++++++++++++++- pkg/apiserver/controllers/v1/usagemetrics.go | 1 - pkg/models/localapi_swagger.yaml | 3 ++ pkg/models/log_processors_metrics.go | 11 +++++ 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 9db8c80e787..05ba5fe3fe4 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -14,7 +14,9 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -77,6 +79,12 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.Name = lpName metrics.LastPush = lp.LastPush.UTC().Unix() + metrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + + //To prevent marshalling a nil slice to null, which gets rejected by the API + if metrics.Metrics == nil { + metrics.Metrics = make([]*models.MetricsDetailItem, 0) + } allMetrics.LogProcessors = append(allMetrics.LogProcessors, &metrics) metricsIds = append(metricsIds, lpsMetric.ID) @@ -114,13 +122,42 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.Name = bouncerName metrics.LastPull = bouncer.LastPull.UTC().Unix() + //To prevent marshalling a nil slice to null, which gets rejected by the API + if metrics.Metrics == nil { + metrics.Metrics = make([]*models.MetricsDetailItem, 0) + } + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &metrics) metricsIds = append(metricsIds, bouncersMetric.ID) } - //bouncerInfos := make(map[string]string) + //FIXME: all of this should only be done once on startup/reload + allMetrics.Lapi = &models.LapiMetrics{ + ConsoleOptions: models.ConsoleOptions{ + "FIXME", + }, + } + allMetrics.Lapi.Os = &models.OSversion{ + Name: "FIXME", + Version: "FIXME", + } + allMetrics.Lapi.Version = ptr.Of(cwversion.VersionStr()) + allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() - //TODO: add LAPI metrics + allMetrics.Lapi.Meta = &models.MetricsMeta{ + UtcStartupTimestamp: time.Now().UTC().Unix(), + UtcNowTimestamp: time.Now().UTC().Unix(), + WindowSizeSeconds: int64(a.metricsInterval.Seconds()), + } + allMetrics.Lapi.Metrics = make([]*models.MetricsDetailItem, 0) + + if allMetrics.RemediationComponents == nil { + allMetrics.RemediationComponents = make([]*models.RemediationComponentsMetrics, 0) + } + + if allMetrics.LogProcessors == nil { + allMetrics.LogProcessors = make([]*models.LogProcessorsMetrics, 0) + } return allMetrics, metricsIds, nil } @@ -306,6 +343,8 @@ func (a *apic) SendUsageMetrics() { err = a.MarkUsageMetricsAsSent(metricsId) if err != nil { log.Errorf("unable to mark usage metrics as sent: %s", err) + } else { + log.Infof("Usage metrics sent") } } } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 1016ce3df2b..37aa0335079 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -108,7 +108,6 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { "type": item0.Type, "metrics": item0.Metrics, "meta": item0.Meta, - // TODO: RC stuff like traffic stats } baseMetrics = item0.BaseMetrics default: diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 5a1bee3cd21..0ce082e50ed 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1060,6 +1060,9 @@ definitions: last_push: type: integer description: last push date + last_update: + type: integer + description: last update date LapiMetrics: title: LapiMetrics type: object diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go index c8c305d765a..ab033b020fc 100644 --- a/pkg/models/log_processors_metrics.go +++ b/pkg/models/log_processors_metrics.go @@ -31,6 +31,9 @@ type LogProcessorsMetrics struct { // last push date LastPush int64 `json:"last_push,omitempty"` + // last update date + LastUpdate int64 `json:"last_update,omitempty"` + // name of the log processor Name string `json:"name,omitempty"` } @@ -54,6 +57,8 @@ func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { LastPush int64 `json:"last_push,omitempty"` + LastUpdate int64 `json:"last_update,omitempty"` + Name string `json:"name,omitempty"` } if err := swag.ReadJSON(raw, &dataAO1); err != nil { @@ -68,6 +73,8 @@ func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { m.LastPush = dataAO1.LastPush + m.LastUpdate = dataAO1.LastUpdate + m.Name = dataAO1.Name return nil @@ -91,6 +98,8 @@ func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { LastPush int64 `json:"last_push,omitempty"` + LastUpdate int64 `json:"last_update,omitempty"` + Name string `json:"name,omitempty"` } @@ -102,6 +111,8 @@ func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { dataAO1.LastPush = m.LastPush + dataAO1.LastUpdate = m.LastUpdate + dataAO1.Name = m.Name jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) From e36d2cb6b88c5aa603cb2abb1d7cbff45e0383e0 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 22 Apr 2024 17:58:09 +0200 Subject: [PATCH 019/119] up --- pkg/apiserver/apic_metrics.go | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 05ba5fe3fe4..260f59d3470 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -328,25 +328,21 @@ func (a *apic) SendUsageMetrics() { metrics, metricsId, err := a.GetUsageMetrics() if err != nil { log.Errorf("unable to get usage metrics: %s", err) + continue } - /*jsonStr, err := json.Marshal(metrics) - if err != nil { - log.Errorf("unable to marshal usage metrics (%s)", err) - }*/ - //fmt.Printf("Usage metrics: %s\n", string(jsonStr)) _, _, err = a.apiClient.UsageMetrics.Add(context.Background(), metrics) if err != nil { log.Errorf("unable to send usage metrics: %s", err) - } else { - - err = a.MarkUsageMetricsAsSent(metricsId) - if err != nil { - log.Errorf("unable to mark usage metrics as sent: %s", err) - } else { - log.Infof("Usage metrics sent") - } + continue } + err = a.MarkUsageMetricsAsSent(metricsId) + if err != nil { + log.Errorf("unable to mark usage metrics as sent: %s", err) + continue + } + log.Infof("Usage metrics sent") + } } } From 12ce5e3fc14fdefbefd57002c518431055f6d8bd Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 23 Apr 2024 13:22:54 +0200 Subject: [PATCH 020/119] add more required fields in models --- pkg/models/localapi_swagger.yaml | 4 +++ pkg/models/metrics_detail_item.go | 49 +++++++++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 6e40a47ef79..42437be7023 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1142,6 +1142,10 @@ definitions: labels: $ref: '#/definitions/MetricsLabels' description: labels of the metric + required: + - name + - value + - unit MetricsMeta: title: MetricsMeta type: object diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go index ed13470eece..889f7e263d2 100644 --- a/pkg/models/metrics_detail_item.go +++ b/pkg/models/metrics_detail_item.go @@ -11,6 +11,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // MetricsDetailItem MetricsDetailItem @@ -22,13 +23,16 @@ type MetricsDetailItem struct { Labels MetricsLabels `json:"labels,omitempty"` // name of the metric - Name string `json:"name,omitempty"` + // Required: true + Name *string `json:"name"` // unit of the metric - Unit string `json:"unit,omitempty"` + // Required: true + Unit *string `json:"unit"` // value of the metric - Value float64 `json:"value,omitempty"` + // Required: true + Value *float64 `json:"value"` } // Validate validates this metrics detail item @@ -39,6 +43,18 @@ func (m *MetricsDetailItem) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUnit(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -64,6 +80,33 @@ func (m *MetricsDetailItem) validateLabels(formats strfmt.Registry) error { return nil } +func (m *MetricsDetailItem) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *MetricsDetailItem) validateUnit(formats strfmt.Registry) error { + + if err := validate.Required("unit", "body", m.Unit); err != nil { + return err + } + + return nil +} + +func (m *MetricsDetailItem) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + // ContextValidate validate this metrics detail item based on the context it is used func (m *MetricsDetailItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error From 8d62aef86c5a42915515e3cb25da8c89d778071c Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 2 May 2024 23:33:40 +0200 Subject: [PATCH 021/119] fix test --- pkg/apiclient/client_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index d3296c4b67f..e5d5df1b0da 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -347,5 +347,5 @@ func TestNewClientBadAnswer(t *testing.T) { URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) - cstest.RequireErrorContains(t, err, "invalid body: invalid character 'b' looking for beginning of value") + cstest.RequireErrorContains(t, err, "invalid body: bad") } From de26714e101917a969dc7fd081079ea4b87b5c3b Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 3 May 2024 00:38:03 +0200 Subject: [PATCH 022/119] add cscli machines inspect --- cmd/crowdsec-cli/machines.go | 111 +++++++++++++++++++++++++++++++---- 1 file changed, 101 insertions(+), 10 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 1457fb5a0cc..ae24c03352c 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/big" "os" "slices" @@ -13,6 +14,7 @@ import ( "time" "github.com/AlecAivazis/survey/v2" + "github.com/aquasecurity/table" "github.com/fatih/color" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -24,8 +26,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -147,21 +151,69 @@ Note: This command requires database direct access, so is intended to be run on cmd.AddCommand(cli.newDeleteCmd()) cmd.AddCommand(cli.newValidateCmd()) cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) return cmd } -func (cli *cliMachines) list() error { - out := color.Output +func showItems(out io.Writer, itemType string, items map[string]models.HubItem) { + t := newLightTable(out) + t.SetHeaders("Name", "Status", "Version") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - machines, err := cli.db.ListMachines() - if err != nil { - return fmt.Errorf("unable to list machines: %w", err) + for name, item := range items { + t.AddRow(name, item.Status, item.Version) + } + + renderTableTitle(out, strings.ToUpper(itemType)) + t.Render() +} + +func showHubState(out io.Writer, machines []*ent.Machine) { + + //FIXME: ugly + items := make(map[string]map[string]models.HubItem) + + for _, itemType := range cwhub.ItemTypes { + items[itemType] = map[string]models.HubItem{} } + for _, machine := range machines { + state := machine.Hubstate + + if state == nil { + continue + } + + for name, item := range *state { + //here, name is type:actual_name + //we want to split it to get the type + split := strings.Split(name, ":") + if len(split) != 2 { + log.Warningf("invalid hub item name '%s'", name) + continue + } + items[split[0]][split[1]] = item + //items[split[0]] = append(items[split[0]], item) + } + } + + for _, t := range cwhub.ItemTypes { + showItems(out, t, items[t]) + } +} + +func (cli *cliMachines) machinesShow(machines []*ent.Machine, showHub bool) error { + //FIXME: should showHub be used for json/raw output ? + out := color.Output + switch cli.cfg().Cscli.Output { case "human": getAgentsTable(out, machines) + if showHub { + showHubState(out, machines) + } case "json": enc := json.NewEncoder(out) enc.SetIndent("", " ") @@ -174,7 +226,7 @@ func (cli *cliMachines) list() error { case "raw": csvwriter := csv.NewWriter(out) - err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os", "feature_flags"}) if err != nil { return fmt.Errorf("failed to write header: %w", err) } @@ -187,17 +239,26 @@ func (cli *cliMachines) list() error { hb, _ := getLastHeartbeat(m) - if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}); err != nil { + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion), m.Featureflags}); err != nil { return fmt.Errorf("failed to write raw output: %w", err) } } csvwriter.Flush() } - return nil } +func (cli *cliMachines) list() error { + + machines, err := cli.db.ListMachines() + if err != nil { + return fmt.Errorf("unable to list machines: %w", err) + } + + return cli.machinesShow(machines, false) +} + func (cli *cliMachines) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", @@ -399,7 +460,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. " + + "The duration you provided is less than 2 minutes. "+ "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { return err } else if !yes { @@ -428,7 +489,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b if !force { if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above machines from the database. " + + "You are about to PERMANENTLY remove the above machines from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err } else if !yes { @@ -503,3 +564,33 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { return cmd } + +func (cli *cliMachines) inspect(machineID string, showHub bool) error { + machine, err := cli.db.QueryMachineByID(machineID) + if err != nil { + return fmt.Errorf("unable to get machine '%s': %w", machineID, err) + } + + return cli.machinesShow([]*ent.Machine{machine}, showHub) +} + +func (cli *cliMachines) newInspectCmd() *cobra.Command { + var showHub bool + + cmd := &cobra.Command{ + Use: "inspect [machine_name]", + Short: "inspect a machine by name", + Example: `cscli machines inspect "machine1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + return cli.inspect(args[0], showHub) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") + + return cmd +} From f6b8b01871fdc3084ecebc68dee7ff8b32cfe766 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 3 May 2024 10:02:09 +0200 Subject: [PATCH 023/119] lint --- pkg/apiclient/resperr.go | 2 +- pkg/apiserver/apic_metrics.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index 6edd1334ec5..e8f12ee9f4e 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -39,7 +39,7 @@ func CheckResponse(r *http.Response) error { } switch r.StatusCode { - case 422: + case http.StatusUnprocessableEntity: ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid request: %s", r.StatusCode, string(data))) default: if err := json.Unmarshal(data, ret); err != nil { diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 260f59d3470..1643bb5441c 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -81,7 +81,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.LastPush = lp.LastPush.UTC().Unix() metrics.LastUpdate = lp.UpdatedAt.UTC().Unix() - //To prevent marshalling a nil slice to null, which gets rejected by the API + //To prevent marshaling a nil slice to null, which gets rejected by the API if metrics.Metrics == nil { metrics.Metrics = make([]*models.MetricsDetailItem, 0) } From 9d6b6cd9174d0827e89bddc17d7d66afc6b19144 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 3 May 2024 10:18:58 +0200 Subject: [PATCH 024/119] fix bats test for usage-metrics --- test/bats/30_machines.bats | 1 - 1 file changed, 1 deletion(-) diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index f9f87a56de9..284f763f132 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -113,7 +113,6 @@ teardown() { payload=$(cat <<-EOT remediation_components: [] log_processors: - - - version: "v1.0" feature_flags: - marshmallows From 088dd1ae133a039275837e43ac6376fc6d11d035 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 3 May 2024 10:34:01 +0200 Subject: [PATCH 025/119] lint --- pkg/apiserver/apic_metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 1643bb5441c..3872e8c656e 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -122,7 +122,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { metrics.Name = bouncerName metrics.LastPull = bouncer.LastPull.UTC().Unix() - //To prevent marshalling a nil slice to null, which gets rejected by the API + //To prevent marshaling a nil slice to null, which gets rejected by the API if metrics.Metrics == nil { metrics.Metrics = make([]*models.MetricsDetailItem, 0) } From f08a6bcac60fcaf2a82ed1339224a26d6d625afd Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 6 Jun 2024 21:12:11 +0200 Subject: [PATCH 026/119] remove console_options for LPs --- cmd/crowdsec/lpmetrics.go | 7 ++- pkg/apiserver/controllers/v1/usagemetrics.go | 7 ++- pkg/models/localapi_swagger.yaml | 2 - pkg/models/log_processors_metrics.go | 51 -------------------- 4 files changed, 6 insertions(+), 61 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 82d4619d113..f32156e31b1 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -126,10 +126,9 @@ func (m *MetricsProvider) metricsPayload() *models.AllMetrics { } met := &models.LogProcessorsMetrics{ - BaseMetrics: base, - ConsoleOptions: m.static.consoleOptions, - Datasources: m.static.datasourceMap, - HubItems: m.static.hubState, + BaseMetrics: base, + Datasources: m.static.datasourceMap, + HubItems: m.static.hubState, } // TODO: more metric details... ? diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 37aa0335079..120e6662fc5 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -85,10 +85,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { // guaranteed by the swagger schema item0 := input.LogProcessors[0] payload = map[string]any{ - "console_options": item0.ConsoleOptions, - "datasources": item0.Datasources, - "metrics": item0.Metrics, - "meta": item0.Meta, + "datasources": item0.Datasources, + "metrics": item0.Metrics, + "meta": item0.Meta, } baseMetrics = item0.BaseMetrics hubItems = item0.HubItems diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 42437be7023..05b7af4d7ee 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1045,8 +1045,6 @@ definitions: allOf: - $ref: '#/definitions/BaseMetrics' - properties: - console_options: - $ref: '#/definitions/ConsoleOptions' hub_items: $ref: '#/definitions/HubItems' datasources: diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go index ab033b020fc..83bc1f4a118 100644 --- a/pkg/models/log_processors_metrics.go +++ b/pkg/models/log_processors_metrics.go @@ -19,9 +19,6 @@ import ( type LogProcessorsMetrics struct { BaseMetrics - // console options - ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` - // Number of datasources per type Datasources map[string]int64 `json:"datasources,omitempty"` @@ -49,8 +46,6 @@ func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { // AO1 var dataAO1 struct { - ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` - Datasources map[string]int64 `json:"datasources,omitempty"` HubItems HubItems `json:"hub_items,omitempty"` @@ -65,8 +60,6 @@ func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { return err } - m.ConsoleOptions = dataAO1.ConsoleOptions - m.Datasources = dataAO1.Datasources m.HubItems = dataAO1.HubItems @@ -90,8 +83,6 @@ func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { } _parts = append(_parts, aO0) var dataAO1 struct { - ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` - Datasources map[string]int64 `json:"datasources,omitempty"` HubItems HubItems `json:"hub_items,omitempty"` @@ -103,8 +94,6 @@ func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { Name string `json:"name,omitempty"` } - dataAO1.ConsoleOptions = m.ConsoleOptions - dataAO1.Datasources = m.Datasources dataAO1.HubItems = m.HubItems @@ -132,10 +121,6 @@ func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateConsoleOptions(formats); err != nil { - res = append(res, err) - } - if err := m.validateHubItems(formats); err != nil { res = append(res, err) } @@ -146,24 +131,6 @@ func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { return nil } -func (m *LogProcessorsMetrics) validateConsoleOptions(formats strfmt.Registry) error { - - if swag.IsZero(m.ConsoleOptions) { // not required - return nil - } - - if err := m.ConsoleOptions.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("console_options") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("console_options") - } - return err - } - - return nil -} - func (m *LogProcessorsMetrics) validateHubItems(formats strfmt.Registry) error { if swag.IsZero(m.HubItems) { // not required @@ -193,10 +160,6 @@ func (m *LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strf res = append(res, err) } - if err := m.contextValidateConsoleOptions(ctx, formats); err != nil { - res = append(res, err) - } - if err := m.contextValidateHubItems(ctx, formats); err != nil { res = append(res, err) } @@ -207,20 +170,6 @@ func (m *LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strf return nil } -func (m *LogProcessorsMetrics) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { - - if err := m.ConsoleOptions.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("console_options") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("console_options") - } - return err - } - - return nil -} - func (m *LogProcessorsMetrics) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { if swag.IsZero(m.HubItems) { // not required From 0cd822a0fcf7a0710af7bf1e55c7116d4e77dd82 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 11 Jun 2024 17:59:08 +0200 Subject: [PATCH 027/119] wip --- pkg/apiserver/apic_metrics.go | 121 +++++++++++++++------------------- pkg/apiserver/apiserver.go | 4 +- pkg/database/metrics.go | 34 ++++++++++ 3 files changed, 89 insertions(+), 70 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 8f690dea7bd..bed40ec1f10 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -8,126 +8,106 @@ import ( "slices" + "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/models" ) func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { - lpsMetrics, err := a.dbClient.GetLPsUsageMetrics() + allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) + lps, err := a.dbClient.ListMachines() if err != nil { return nil, nil, err } - //spew.Dump(lpsMetrics) - - bouncersMetrics, err := a.dbClient.GetBouncersUsageMetrics() + bouncers, err := a.dbClient.ListBouncers() if err != nil { return nil, nil, err } - //spew.Dump(bouncersMetrics) - - allMetrics := &models.AllMetrics{} - - lpsCache := make(map[string]*ent.Machine) - bouncersCache := make(map[string]*ent.Bouncer) - - for _, lpsMetric := range lpsMetrics { - lpName := lpsMetric.GeneratedBy - metrics := models.LogProcessorsMetrics{} + for _, bouncer := range bouncers { + metrics := models.RemediationComponentsMetrics{} - err := json.Unmarshal([]byte(lpsMetric.Payload), &metrics) + dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) if err != nil { - log.Errorf("unable to unmarshal LPs metrics (%s)", err) + log.Errorf("unable to get bouncer usage metrics: %s", err) continue } - var lp *ent.Machine - - if _, ok := lpsCache[lpName]; !ok { - lp, err = a.dbClient.QueryMachineByID(lpName) + metrics.Metrics = make([]*models.MetricsDetailItem, 0) + for _, dbMetric := range dbMetrics { + metric := &models.MetricsDetailItem{} + //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + metricsIds = append(metricsIds, dbMetric.ID) + err := json.Unmarshal([]byte(dbMetric.Payload), metric) if err != nil { - log.Errorf("unable to get LP information for %s: %s", lpName, err) + log.Errorf("unable to unmarshal bouncer metric (%s)", err) continue } - } else { - lp = lpsCache[lpName] - } - if lp.Hubstate != nil { - metrics.HubItems = *lp.Hubstate + metrics.Metrics = append(metrics.Metrics, metric) } metrics.Os = &models.OSversion{ - Name: lp.Osname, - Version: lp.Osversion, - } - - metrics.FeatureFlags = strings.Split(lp.Featureflags, ",") - metrics.Version = &lp.Version - - metrics.Name = lpName - metrics.LastPush = lp.LastPush.UTC().Unix() - metrics.LastUpdate = lp.UpdatedAt.UTC().Unix() - - //To prevent marshaling a nil slice to null, which gets rejected by the API - if metrics.Metrics == nil { - metrics.Metrics = make([]*models.MetricsDetailItem, 0) + Name: bouncer.Osname, + Version: bouncer.Osversion, } + metrics.Type = bouncer.Type + metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") + metrics.Version = &bouncer.Version + metrics.Name = bouncer.Name + metrics.LastPull = bouncer.LastPull.UTC().Unix() - allMetrics.LogProcessors = append(allMetrics.LogProcessors, &metrics) - metricsIds = append(metricsIds, lpsMetric.ID) + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &metrics) } - for _, bouncersMetric := range bouncersMetrics { - bouncerName := bouncersMetric.GeneratedBy - metrics := models.RemediationComponentsMetrics{} + for _, lp := range lps { + metrics := models.LogProcessorsMetrics{} - err := json.Unmarshal([]byte(bouncersMetric.Payload), &metrics) + dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) if err != nil { - log.Errorf("unable to unmarshal bouncers metrics (%s)", err) + log.Errorf("unable to get LP usage metrics: %s", err) continue } - var bouncer *ent.Bouncer + metrics.Metrics = make([]*models.MetricsDetailItem, 0) + for _, dbMetric := range dbMetrics { + metric := &models.MetricsDetailItem{} + //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + metricsIds = append(metricsIds, dbMetric.ID) - if _, ok := bouncersCache[bouncerName]; !ok { - bouncer, err = a.dbClient.SelectBouncerByName(bouncerName) + err := json.Unmarshal([]byte(dbMetric.Payload), metric) if err != nil { - log.Errorf("unable to get bouncer information for %s: %s", bouncerName, err) + log.Errorf("unable to unmarshal LP metric (%s)", err) continue } - } else { - bouncer = bouncersCache[bouncerName] - } - metrics.Os = &models.OSversion{ - Name: bouncer.Osname, - Version: bouncer.Osversion, + metrics.Metrics = append(metrics.Metrics, metric) } - metrics.Type = bouncer.Type - metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") - metrics.Version = &bouncer.Version - metrics.Name = bouncerName - metrics.LastPull = bouncer.LastPull.UTC().Unix() - //To prevent marshaling a nil slice to null, which gets rejected by the API - if metrics.Metrics == nil { - metrics.Metrics = make([]*models.MetricsDetailItem, 0) + if lp.Hubstate != nil { + metrics.HubItems = *lp.Hubstate } + metrics.Os = &models.OSversion{ + Name: lp.Osname, + Version: lp.Osversion, + } + metrics.FeatureFlags = strings.Split(lp.Featureflags, ",") + metrics.Version = &lp.Version + metrics.Name = lp.MachineId + metrics.LastPush = lp.LastPush.UTC().Unix() + metrics.LastUpdate = lp.UpdatedAt.UTC().Unix() - allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &metrics) - metricsIds = append(metricsIds, bouncersMetric.ID) + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &metrics) } //FIXME: all of this should only be done once on startup/reload @@ -144,12 +124,13 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() allMetrics.Lapi.Meta = &models.MetricsMeta{ - UtcStartupTimestamp: time.Now().UTC().Unix(), + UtcStartupTimestamp: time.Now().UTC().Unix(), //FIXME: should be the actual startup time UtcNowTimestamp: time.Now().UTC().Unix(), WindowSizeSeconds: int64(a.metricsInterval.Seconds()), } allMetrics.Lapi.Metrics = make([]*models.MetricsDetailItem, 0) + //Force an actual slice to avoid non existing fields in the json if allMetrics.RemediationComponents == nil { allMetrics.RemediationComponents = make([]*models.RemediationComponentsMetrics, 0) } @@ -329,6 +310,8 @@ func (a *apic) SendUsageMetrics() { log.Errorf("unable to get usage metrics: %s", err) continue } + + spew.Dump(metrics) _, _, err = a.apiClient.UsageMetrics.Add(context.Background(), metrics) if err != nil { diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 59b0cd3551d..b51a13d0e9e 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -370,6 +370,8 @@ func (s *APIServer) Run(apiReady chan bool) error { if fflag.CAPIUsageMetrics.IsEnabled() { log.Infof("CAPI_USAGE_METRICS flag is enabled, starting usage metrics routine") s.apic.metricsTomb.Go(func() error { + //staticMetrics := NewStaticMetrics(consoleOptions, datasources, hub) + s.apic.SendUsageMetrics() return nil }) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index c7932367b34..ae3138891f0 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -54,6 +54,23 @@ func (c *Client) GetLPsUsageMetrics() ([]*ent.Metric, error) { return metrics, nil } +func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeLP), + metric.GeneratedByEQ(machineId), + metric.PushedAtIsNil(), + ). + Order(ent.Desc(metric.FieldCollectedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) + return nil, fmt.Errorf("getting LP usage metrics by origin %s: %w", machineId, err) + } + + return metrics, nil +} + func (c *Client) GetBouncersUsageMetrics() ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( @@ -70,6 +87,23 @@ func (c *Client) GetBouncersUsageMetrics() ([]*ent.Metric, error) { return metrics, nil } +func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + metric.GeneratedByEQ(bouncerName), + metric.PushedAtIsNil(), + ). + Order(ent.Desc(metric.FieldCollectedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetBouncerUsageMetricsByOrigin: %s", err) + return nil, fmt.Errorf("getting bouncer usage metrics by origin %s: %w", bouncerName, err) + } + + return metrics, nil +} + func (c *Client) MarkUsageMetricsAsSent(ids []int) error { _, err := c.Ent.Metric.Update(). Where(metric.IDIn(ids...)). From 5c8cfadc88a995538ccfec418df311a8c2db1b00 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 18 Jun 2024 13:55:51 +0200 Subject: [PATCH 028/119] wip --- pkg/apiserver/apic_metrics.go | 99 ++++++++++++++++++++++------------- 1 file changed, 63 insertions(+), 36 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index bed40ec1f10..fa1f14195e8 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -19,6 +19,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +type dbPayload struct { + Metrics *models.MetricsDetailItem `json:"metrics"` + Meta *models.MetricsMeta `json:"meta"` + Datasources map[string]int64 `json:"datasources"` +} + func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) @@ -34,7 +40,6 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { } for _, bouncer := range bouncers { - metrics := models.RemediationComponentsMetrics{} dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) if err != nil { @@ -42,36 +47,44 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - metrics.Metrics = make([]*models.MetricsDetailItem, 0) + //Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics for _, dbMetric := range dbMetrics { - metric := &models.MetricsDetailItem{} + rcMetrics := models.RemediationComponentsMetrics{} + rcMetrics.Metrics = make([]*models.MetricsDetailItem, 0) + + dbPayload := &dbPayload{} //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically metricsIds = append(metricsIds, dbMetric.ID) - err := json.Unmarshal([]byte(dbMetric.Payload), metric) + err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { log.Errorf("unable to unmarshal bouncer metric (%s)", err) continue } - metrics.Metrics = append(metrics.Metrics, metric) - } + rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics) - metrics.Os = &models.OSversion{ - Name: bouncer.Osname, - Version: bouncer.Osversion, - } - metrics.Type = bouncer.Type - metrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") - metrics.Version = &bouncer.Version - metrics.Name = bouncer.Name - metrics.LastPull = bouncer.LastPull.UTC().Unix() + rcMetrics.Os = &models.OSversion{ + Name: bouncer.Osname, + Version: bouncer.Osversion, + } + rcMetrics.Type = bouncer.Type + rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") + rcMetrics.Version = &bouncer.Version + rcMetrics.Name = bouncer.Name + rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + + rcMetrics.Meta = &models.MetricsMeta{ + UtcStartupTimestamp: dbPayload.Meta.UtcStartupTimestamp, + UtcNowTimestamp: dbPayload.Meta.UtcNowTimestamp, + WindowSizeSeconds: dbPayload.Meta.WindowSizeSeconds, + } - allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &metrics) + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) + } } for _, lp := range lps { - metrics := models.LogProcessorsMetrics{} dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) if err != nil { @@ -79,35 +92,49 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - metrics.Metrics = make([]*models.MetricsDetailItem, 0) for _, dbMetric := range dbMetrics { - metric := &models.MetricsDetailItem{} + lpMetrics := models.LogProcessorsMetrics{} + lpMetrics.Metrics = make([]*models.MetricsDetailItem, 0) + + dbPayload := &dbPayload{} //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically metricsIds = append(metricsIds, dbMetric.ID) - err := json.Unmarshal([]byte(dbMetric.Payload), metric) + err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal LP metric (%s)", err) + log.Errorf("unable to unmarshal log processor metric (%s)", err) continue } - metrics.Metrics = append(metrics.Metrics, metric) - } + lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics) - if lp.Hubstate != nil { - metrics.HubItems = *lp.Hubstate - } - metrics.Os = &models.OSversion{ - Name: lp.Osname, - Version: lp.Osversion, - } - metrics.FeatureFlags = strings.Split(lp.Featureflags, ",") - metrics.Version = &lp.Version - metrics.Name = lp.MachineId - metrics.LastPush = lp.LastPush.UTC().Unix() - metrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + if lp.Hubstate != nil { + lpMetrics.HubItems = *lp.Hubstate + } + lpMetrics.Os = &models.OSversion{ + Name: lp.Osname, + Version: lp.Osversion, + } + lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") + lpMetrics.Version = &lp.Version + lpMetrics.Name = lp.MachineId + lpMetrics.LastPush = lp.LastPush.UTC().Unix() + lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + + lpMetrics.Meta = &models.MetricsMeta{ + UtcStartupTimestamp: dbPayload.Meta.UtcStartupTimestamp, + UtcNowTimestamp: dbPayload.Meta.UtcNowTimestamp, + WindowSizeSeconds: dbPayload.Meta.WindowSizeSeconds, + } - allMetrics.LogProcessors = append(allMetrics.LogProcessors, &metrics) + lpMetrics.Datasources = make(map[string]int64) + + for k, v := range dbPayload.Datasources { + lpMetrics.Datasources[k] = v + } + + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) + } } //FIXME: all of this should only be done once on startup/reload From 61d2dba0c6ed56c21fa2a818200f82cd889ace2f Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 18 Jun 2024 13:56:40 +0200 Subject: [PATCH 029/119] wip --- pkg/apiserver/apic_metrics.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index fa1f14195e8..a70a1921a39 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -351,7 +351,6 @@ func (a *apic) SendUsageMetrics() { continue } log.Infof("Usage metrics sent") - } } } From 2fd72e7a6842130f64cc4edd6f39f4f5c2f26cf7 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Wed, 19 Jun 2024 17:54:45 +0200 Subject: [PATCH 030/119] up --- pkg/apiserver/apic_metrics.go | 122 +++++++++++++++++++++------------- 1 file changed, 75 insertions(+), 47 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index a70a1921a39..1a855bd2348 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -8,13 +8,14 @@ import ( "slices" - "github.com/davecgh/go-spew/spew" + "github.com/blackfireio/osinfo" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -25,6 +26,19 @@ type dbPayload struct { Datasources map[string]int64 `json:"datasources"` } +func detectOS() (string, string) { + if version.System == "docker" { + return "docker", "" + } + + osInfo, err := osinfo.GetOSInfo() + if err != nil { + return version.System, "???" + } + + return osInfo.Name, osInfo.Version +} + func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) @@ -47,10 +61,23 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } + rcMetrics := models.RemediationComponentsMetrics{} + + rcMetrics.Os = &models.OSversion{ + Name: bouncer.Osname, + Version: bouncer.Osversion, + } + rcMetrics.Type = bouncer.Type + rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") + rcMetrics.Version = &bouncer.Version + rcMetrics.Name = bouncer.Name + rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + + rcMetrics.Metrics = make([]*models.MetricsDetailItem, 0) + rcMetrics.Meta = &models.MetricsMeta{} + //Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics for _, dbMetric := range dbMetrics { - rcMetrics := models.RemediationComponentsMetrics{} - rcMetrics.Metrics = make([]*models.MetricsDetailItem, 0) dbPayload := &dbPayload{} //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically @@ -62,24 +89,16 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics) + rcMetrics.Meta.UtcStartupTimestamp = dbPayload.Meta.UtcStartupTimestamp + rcMetrics.Meta.UtcNowTimestamp = dbPayload.Meta.UtcNowTimestamp + rcMetrics.Meta.WindowSizeSeconds = dbPayload.Meta.WindowSizeSeconds - rcMetrics.Os = &models.OSversion{ - Name: bouncer.Osname, - Version: bouncer.Osversion, - } - rcMetrics.Type = bouncer.Type - rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") - rcMetrics.Version = &bouncer.Version - rcMetrics.Name = bouncer.Name - rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() - - rcMetrics.Meta = &models.MetricsMeta{ - UtcStartupTimestamp: dbPayload.Meta.UtcStartupTimestamp, - UtcNowTimestamp: dbPayload.Meta.UtcNowTimestamp, - WindowSizeSeconds: dbPayload.Meta.WindowSizeSeconds, - } + rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics) + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) + } + //If we have no metrics, we still want to send the bouncer + if len(rcMetrics.Metrics) == 0 { allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) } } @@ -92,9 +111,28 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } + lpMetrics := models.LogProcessorsMetrics{} + + lpMetrics.Os = &models.OSversion{ + Name: lp.Osname, + Version: lp.Osversion, + } + lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") + lpMetrics.Version = &lp.Version + lpMetrics.Name = lp.MachineId + lpMetrics.LastPush = lp.LastPush.UTC().Unix() + lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + + lpMetrics.Datasources = make(map[string]int64) + + if lp.Hubstate != nil { + lpMetrics.HubItems = *lp.Hubstate + } + + lpMetrics.Metrics = make([]*models.MetricsDetailItem, 0) + lpMetrics.Meta = &models.MetricsMeta{} + for _, dbMetric := range dbMetrics { - lpMetrics := models.LogProcessorsMetrics{} - lpMetrics.Metrics = make([]*models.MetricsDetailItem, 0) dbPayload := &dbPayload{} //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically @@ -106,46 +144,37 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics) - - if lp.Hubstate != nil { - lpMetrics.HubItems = *lp.Hubstate - } - lpMetrics.Os = &models.OSversion{ - Name: lp.Osname, - Version: lp.Osversion, - } - lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") - lpMetrics.Version = &lp.Version - lpMetrics.Name = lp.MachineId - lpMetrics.LastPush = lp.LastPush.UTC().Unix() - lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() - - lpMetrics.Meta = &models.MetricsMeta{ - UtcStartupTimestamp: dbPayload.Meta.UtcStartupTimestamp, - UtcNowTimestamp: dbPayload.Meta.UtcNowTimestamp, - WindowSizeSeconds: dbPayload.Meta.WindowSizeSeconds, - } - - lpMetrics.Datasources = make(map[string]int64) + lpMetrics.Meta.UtcStartupTimestamp = dbPayload.Meta.UtcStartupTimestamp + lpMetrics.Meta.UtcNowTimestamp = dbPayload.Meta.UtcNowTimestamp + lpMetrics.Meta.WindowSizeSeconds = dbPayload.Meta.WindowSizeSeconds for k, v := range dbPayload.Datasources { lpMetrics.Datasources[k] = v } + lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics) + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) + } + + //If we have no metrics, we still want to send the LP + if len(lpMetrics.Metrics) == 0 { allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) } } //FIXME: all of this should only be done once on startup/reload + consoleOptions := strings.Join(csconfig.GetConfig().API.Server.ConsoleConfig.EnabledOptions(), ",") allMetrics.Lapi = &models.LapiMetrics{ ConsoleOptions: models.ConsoleOptions{ - "FIXME", + consoleOptions, }, } + + osName, osVersion := detectOS() + allMetrics.Lapi.Os = &models.OSversion{ - Name: "FIXME", - Version: "FIXME", + Name: osName, + Version: osVersion, } allMetrics.Lapi.Version = ptr.Of(version.String()) allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() @@ -338,7 +367,6 @@ func (a *apic) SendUsageMetrics() { continue } - spew.Dump(metrics) _, _, err = a.apiClient.UsageMetrics.Add(context.Background(), metrics) if err != nil { From 16c7d59eb47760e6c083a837f672a6652dd1428e Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 20 Jun 2024 15:42:42 +0200 Subject: [PATCH 031/119] up --- cmd/crowdsec/lpmetrics.go | 10 ++-- pkg/apiserver/apic_metrics.go | 22 ++++---- pkg/apiserver/controllers/v1/usagemetrics.go | 2 +- pkg/database/bouncers.go | 6 +-- pkg/database/machines.go | 8 +-- pkg/models/base_metrics.go | 24 +++++---- pkg/models/localapi_swagger.yaml | 12 ++++- pkg/models/log_processors_metrics.go | 36 ++++++++----- pkg/models/metrics_meta.go | 55 ++++++++++++++++++-- pkg/models/o_sversion.go | 39 +++++++++++++- 10 files changed, 161 insertions(+), 53 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index f32156e31b1..6067766946a 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -109,13 +109,13 @@ func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logge func (m *MetricsProvider) metricsPayload() *models.AllMetrics { meta := &models.MetricsMeta{ - UtcStartupTimestamp: m.static.startupTS, - WindowSizeSeconds: int64(m.interval.Seconds()), + UtcStartupTimestamp: ptr.Of(m.static.startupTS), + WindowSizeSeconds: ptr.Of(int64(m.interval.Seconds())), } os := &models.OSversion{ - Name: m.static.osName, - Version: m.static.osVersion, + Name: ptr.Of(m.static.osName), + Version: ptr.Of(m.static.osVersion), } base := models.BaseMetrics{ @@ -152,7 +152,7 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { for { select { case <-ticker.C: - met.LogProcessors[0].Meta.UtcNowTimestamp = time.Now().Unix() + met.LogProcessors[0].Meta.UtcNowTimestamp = ptr.Of(time.Now().Unix()) ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 1a855bd2348..1ad0f53145c 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -64,12 +64,12 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { rcMetrics := models.RemediationComponentsMetrics{} rcMetrics.Os = &models.OSversion{ - Name: bouncer.Osname, - Version: bouncer.Osversion, + Name: ptr.Of(bouncer.Osname), + Version: ptr.Of(bouncer.Osversion), } rcMetrics.Type = bouncer.Type rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") - rcMetrics.Version = &bouncer.Version + rcMetrics.Version = ptr.Of(bouncer.Version) rcMetrics.Name = bouncer.Name rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() @@ -114,11 +114,11 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics := models.LogProcessorsMetrics{} lpMetrics.Os = &models.OSversion{ - Name: lp.Osname, - Version: lp.Osversion, + Name: ptr.Of(lp.Osname), + Version: ptr.Of(lp.Osversion), } lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") - lpMetrics.Version = &lp.Version + lpMetrics.Version = ptr.Of(lp.Version) lpMetrics.Name = lp.MachineId lpMetrics.LastPush = lp.LastPush.UTC().Unix() lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() @@ -173,16 +173,16 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { osName, osVersion := detectOS() allMetrics.Lapi.Os = &models.OSversion{ - Name: osName, - Version: osVersion, + Name: ptr.Of(osName), + Version: ptr.Of(osVersion), } allMetrics.Lapi.Version = ptr.Of(version.String()) allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() allMetrics.Lapi.Meta = &models.MetricsMeta{ - UtcStartupTimestamp: time.Now().UTC().Unix(), //FIXME: should be the actual startup time - UtcNowTimestamp: time.Now().UTC().Unix(), - WindowSizeSeconds: int64(a.metricsInterval.Seconds()), + UtcStartupTimestamp: ptr.Of(time.Now().UTC().Unix()), //FIXME: should be the actual startup time + UtcNowTimestamp: ptr.Of(time.Now().UTC().Unix()), + WindowSizeSeconds: ptr.Of(int64(a.metricsInterval.Seconds())), } allMetrics.Lapi.Metrics = make([]*models.MetricsDetailItem, 0) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 120e6662fc5..b9ba23a1cce 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -121,7 +121,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } - collectedAt = time.Unix(baseMetrics.Meta.UtcNowTimestamp, 0).UTC() + collectedAt = time.Unix(*baseMetrics.Meta.UtcNowTimestamp, 0).UTC() jsonPayload, err := json.Marshal(payload) if err != nil { diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index c0a90914c75..372159180af 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -7,9 +7,9 @@ import ( "github.com/pkg/errors" - "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/models" ) func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics *models.BaseMetrics) error { @@ -22,8 +22,8 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string Update(). Where(bouncer.NameEQ(bouncerName)). SetNillableVersion(baseMetrics.Version). - SetOsname(os.Name). - SetOsversion(os.Version). + SetOsname(*os.Name). + SetOsversion(*os.Version). SetFeatureflags(features). SetType(bouncerType). Save(c.CTX) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 6f6359699d5..28fb84c84c4 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -9,9 +9,9 @@ import ( "github.com/pkg/errors" "golang.org/x/crypto/bcrypt" - "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -22,14 +22,14 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models. os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - heartbeat := time.Unix(baseMetrics.Meta.UtcNowTimestamp, 0) + heartbeat := time.Unix(*baseMetrics.Meta.UtcNowTimestamp, 0) _, err := c.Ent.Machine. Update(). Where(machine.MachineIdEQ(machineID)). SetNillableVersion(baseMetrics.Version). - SetOsname(os.Name). - SetOsversion(os.Version). + SetOsname(*os.Name). + SetOsversion(*os.Version). SetFeatureflags(features). SetLastHeartbeat(heartbeat). SetHubstate(hubItems). diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go index 88ededa6b71..132ecf09c29 100644 --- a/pkg/models/base_metrics.go +++ b/pkg/models/base_metrics.go @@ -24,15 +24,13 @@ type BaseMetrics struct { FeatureFlags []string `json:"feature_flags"` // metrics meta - // Required: true - Meta *MetricsMeta `json:"meta"` + Meta *MetricsMeta `json:"meta,omitempty"` // metrics details Metrics []*MetricsDetailItem `json:"metrics"` // OS information - // Required: true - Os *OSversion `json:"os"` + Os *OSversion `json:"os,omitempty"` // version of the remediation component // Required: true @@ -66,9 +64,8 @@ func (m *BaseMetrics) Validate(formats strfmt.Registry) error { } func (m *BaseMetrics) validateMeta(formats strfmt.Registry) error { - - if err := validate.Required("meta", "body", m.Meta); err != nil { - return err + if swag.IsZero(m.Meta) { // not required + return nil } if m.Meta != nil { @@ -112,9 +109,8 @@ func (m *BaseMetrics) validateMetrics(formats strfmt.Registry) error { } func (m *BaseMetrics) validateOs(formats strfmt.Registry) error { - - if err := validate.Required("os", "body", m.Os); err != nil { - return err + if swag.IsZero(m.Os) { // not required + return nil } if m.Os != nil { @@ -166,6 +162,10 @@ func (m *BaseMetrics) contextValidateMeta(ctx context.Context, formats strfmt.Re if m.Meta != nil { + if swag.IsZero(m.Meta) { // not required + return nil + } + if err := m.Meta.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("meta") @@ -208,6 +208,10 @@ func (m *BaseMetrics) contextValidateOs(ctx context.Context, formats strfmt.Regi if m.Os != nil { + if swag.IsZero(m.Os) { // not required + return nil + } + if err := m.Os.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("os") diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 05b7af4d7ee..3fa8a4b7b84 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1061,6 +1061,9 @@ definitions: last_update: type: integer description: last update date + required: + - hub_items + - datasources LapiMetrics: title: LapiMetrics type: object @@ -1112,8 +1115,6 @@ definitions: description: feature flags (expected to be empty for remediation components) required: - version - - os - - meta OSversion: title: OSversion type: object @@ -1124,6 +1125,9 @@ definitions: version: type: string description: version of the OS + required: + - name + - version MetricsDetailItem: title: MetricsDetailItem type: object @@ -1157,6 +1161,10 @@ definitions: utc_now_timestamp: type: integer description: UTC timestamp of the current time + required: + - window_size_seconds + - utc_startup_timestamp + - utc_now_timestamp MetricsLabels: title: MetricsLabels type: object diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go index 83bc1f4a118..05b688fb994 100644 --- a/pkg/models/log_processors_metrics.go +++ b/pkg/models/log_processors_metrics.go @@ -11,6 +11,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // LogProcessorsMetrics LogProcessorsMetrics @@ -20,10 +21,12 @@ type LogProcessorsMetrics struct { BaseMetrics // Number of datasources per type - Datasources map[string]int64 `json:"datasources,omitempty"` + // Required: true + Datasources map[string]int64 `json:"datasources"` // hub items - HubItems HubItems `json:"hub_items,omitempty"` + // Required: true + HubItems HubItems `json:"hub_items"` // last push date LastPush int64 `json:"last_push,omitempty"` @@ -46,9 +49,9 @@ func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { // AO1 var dataAO1 struct { - Datasources map[string]int64 `json:"datasources,omitempty"` + Datasources map[string]int64 `json:"datasources"` - HubItems HubItems `json:"hub_items,omitempty"` + HubItems HubItems `json:"hub_items"` LastPush int64 `json:"last_push,omitempty"` @@ -83,9 +86,9 @@ func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { } _parts = append(_parts, aO0) var dataAO1 struct { - Datasources map[string]int64 `json:"datasources,omitempty"` + Datasources map[string]int64 `json:"datasources"` - HubItems HubItems `json:"hub_items,omitempty"` + HubItems HubItems `json:"hub_items"` LastPush int64 `json:"last_push,omitempty"` @@ -121,6 +124,10 @@ func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateDatasources(formats); err != nil { + res = append(res, err) + } + if err := m.validateHubItems(formats); err != nil { res = append(res, err) } @@ -131,10 +138,19 @@ func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { return nil } +func (m *LogProcessorsMetrics) validateDatasources(formats strfmt.Registry) error { + + if err := validate.Required("datasources", "body", m.Datasources); err != nil { + return err + } + + return nil +} + func (m *LogProcessorsMetrics) validateHubItems(formats strfmt.Registry) error { - if swag.IsZero(m.HubItems) { // not required - return nil + if err := validate.Required("hub_items", "body", m.HubItems); err != nil { + return err } if m.HubItems != nil { @@ -172,10 +188,6 @@ func (m *LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strf func (m *LogProcessorsMetrics) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { - if swag.IsZero(m.HubItems) { // not required - return nil - } - if err := m.HubItems.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("hub_items") diff --git a/pkg/models/metrics_meta.go b/pkg/models/metrics_meta.go index 30bccca0e7c..faa5a975b2c 100644 --- a/pkg/models/metrics_meta.go +++ b/pkg/models/metrics_meta.go @@ -8,8 +8,10 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // MetricsMeta MetricsMeta @@ -18,17 +20,64 @@ import ( type MetricsMeta struct { // UTC timestamp of the current time - UtcNowTimestamp int64 `json:"utc_now_timestamp,omitempty"` + // Required: true + UtcNowTimestamp *int64 `json:"utc_now_timestamp"` // UTC timestamp of the startup of the software - UtcStartupTimestamp int64 `json:"utc_startup_timestamp,omitempty"` + // Required: true + UtcStartupTimestamp *int64 `json:"utc_startup_timestamp"` // Size, in seconds, of the window used to compute the metric - WindowSizeSeconds int64 `json:"window_size_seconds,omitempty"` + // Required: true + WindowSizeSeconds *int64 `json:"window_size_seconds"` } // Validate validates this metrics meta func (m *MetricsMeta) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateUtcNowTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUtcStartupTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateWindowSizeSeconds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsMeta) validateUtcNowTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_now_timestamp", "body", m.UtcNowTimestamp); err != nil { + return err + } + + return nil +} + +func (m *MetricsMeta) validateUtcStartupTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_startup_timestamp", "body", m.UtcStartupTimestamp); err != nil { + return err + } + + return nil +} + +func (m *MetricsMeta) validateWindowSizeSeconds(formats strfmt.Registry) error { + + if err := validate.Required("window_size_seconds", "body", m.WindowSizeSeconds); err != nil { + return err + } + return nil } diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go index ae0a85f859e..eb670409c90 100644 --- a/pkg/models/o_sversion.go +++ b/pkg/models/o_sversion.go @@ -8,8 +8,10 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // OSversion OSversion @@ -18,14 +20,47 @@ import ( type OSversion struct { // name of the OS - Name string `json:"name,omitempty"` + // Required: true + Name *string `json:"name"` // version of the OS - Version string `json:"version,omitempty"` + // Required: true + Version *string `json:"version"` } // Validate validates this o sversion func (m *OSversion) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVersion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OSversion) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *OSversion) validateVersion(formats strfmt.Registry) error { + + if err := validate.Required("version", "body", m.Version); err != nil { + return err + } + return nil } From 0bfbd3e2bcd7f0aa1f864f4dec05562f7e99d63a Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 25 Jun 2024 10:20:21 +0200 Subject: [PATCH 032/119] up --- cmd/crowdsec/lpmetrics.go | 30 ++-- pkg/apiserver/apic_metrics.go | 48 ++--- pkg/apiserver/controllers/v1/usagemetrics.go | 9 +- pkg/database/machines.go | 3 +- pkg/models/base_metrics.go | 70 ++------ pkg/models/detailed_metrics.go | 173 +++++++++++++++++++ pkg/models/localapi_swagger.yaml | 31 ++-- pkg/models/metrics_meta.go | 17 -- 8 files changed, 253 insertions(+), 128 deletions(-) create mode 100644 pkg/models/detailed_metrics.go diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 6067766946a..5b650932bbf 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -76,7 +76,7 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou return staticMetrics{ osName: osName, osVersion: osVersion, - startupTS: time.Now().Unix(), + startupTS: time.Now().UTC().Unix(), featureFlags: fflag.Crowdsec.GetEnabledFeatures(), consoleOptions: consoleOptions, datasourceMap: datasourceMap, @@ -108,21 +108,17 @@ func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logge } func (m *MetricsProvider) metricsPayload() *models.AllMetrics { - meta := &models.MetricsMeta{ - UtcStartupTimestamp: ptr.Of(m.static.startupTS), - WindowSizeSeconds: ptr.Of(int64(m.interval.Seconds())), - } - os := &models.OSversion{ Name: ptr.Of(m.static.osName), Version: ptr.Of(m.static.osVersion), } base := models.BaseMetrics{ - Meta: meta, - Os: os, - Version: ptr.Of(version.String()), - FeatureFlags: m.static.featureFlags, + UtcStartupTimestamp: ptr.Of(m.static.startupTS), + Os: os, + Version: ptr.Of(version.String()), + FeatureFlags: m.static.featureFlags, + Metrics: make([]*models.DetailedMetrics, 0), } met := &models.LogProcessorsMetrics{ @@ -131,6 +127,14 @@ func (m *MetricsProvider) metricsPayload() *models.AllMetrics { HubItems: m.static.hubState, } + met.Metrics = append(met.Metrics, &models.DetailedMetrics{ + Meta: &models.MetricsMeta{ + UtcNowTimestamp: ptr.Of(time.Now().Unix()), + WindowSizeSeconds: ptr.Of(int64(m.interval.Seconds())), + }, + Items: make([]*models.MetricsDetailItem, 0), + }) + // TODO: more metric details... ? return &models.AllMetrics{ @@ -152,7 +156,7 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { for { select { case <-ticker.C: - met.LogProcessors[0].Meta.UtcNowTimestamp = ptr.Of(time.Now().Unix()) + //.UtcNowTimestamp = ptr.Of(time.Now().Unix()) ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -161,17 +165,21 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { switch { case errors.Is(err, context.DeadlineExceeded): m.logger.Warnf("timeout sending lp metrics") + ticker.Reset(m.interval) continue case err != nil && resp != nil && resp.Response.StatusCode == http.StatusNotFound: m.logger.Warnf("metrics endpoint not found, older LAPI?") + ticker.Reset(m.interval) continue case err != nil: m.logger.Warnf("failed to send lp metrics: %s", err) + ticker.Reset(m.interval) continue } if resp.Response.StatusCode != http.StatusCreated { m.logger.Warnf("failed to send lp metrics: %s", resp.Response.Status) + ticker.Reset(m.interval) continue } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 1ad0f53145c..51eba39b328 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -21,9 +21,8 @@ import ( ) type dbPayload struct { - Metrics *models.MetricsDetailItem `json:"metrics"` - Meta *models.MetricsMeta `json:"meta"` - Datasources map[string]int64 `json:"datasources"` + Metrics *models.DetailedMetrics `json:"metrics"` + Datasources map[string]int64 `json:"datasources"` } func detectOS() (string, string) { @@ -73,8 +72,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { rcMetrics.Name = bouncer.Name rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() - rcMetrics.Metrics = make([]*models.MetricsDetailItem, 0) - rcMetrics.Meta = &models.MetricsMeta{} + rcMetrics.Metrics = make([]*models.DetailedMetrics, 0) //Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics for _, dbMetric := range dbMetrics { @@ -89,18 +87,10 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - rcMetrics.Meta.UtcStartupTimestamp = dbPayload.Meta.UtcStartupTimestamp - rcMetrics.Meta.UtcNowTimestamp = dbPayload.Meta.UtcNowTimestamp - rcMetrics.Meta.WindowSizeSeconds = dbPayload.Meta.WindowSizeSeconds - rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics) - allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) } - //If we have no metrics, we still want to send the bouncer - if len(rcMetrics.Metrics) == 0 { - allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) - } + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) } for _, lp := range lps { @@ -129,8 +119,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics.HubItems = *lp.Hubstate } - lpMetrics.Metrics = make([]*models.MetricsDetailItem, 0) - lpMetrics.Meta = &models.MetricsMeta{} + lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) for _, dbMetric := range dbMetrics { @@ -144,22 +133,14 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - lpMetrics.Meta.UtcStartupTimestamp = dbPayload.Meta.UtcStartupTimestamp - lpMetrics.Meta.UtcNowTimestamp = dbPayload.Meta.UtcNowTimestamp - lpMetrics.Meta.WindowSizeSeconds = dbPayload.Meta.WindowSizeSeconds + lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics) for k, v := range dbPayload.Datasources { lpMetrics.Datasources[k] = v } - - lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics) - allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) } - //If we have no metrics, we still want to send the LP - if len(lpMetrics.Metrics) == 0 { - allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) - } + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) } //FIXME: all of this should only be done once on startup/reload @@ -179,12 +160,15 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { allMetrics.Lapi.Version = ptr.Of(version.String()) allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() - allMetrics.Lapi.Meta = &models.MetricsMeta{ - UtcStartupTimestamp: ptr.Of(time.Now().UTC().Unix()), //FIXME: should be the actual startup time - UtcNowTimestamp: ptr.Of(time.Now().UTC().Unix()), - WindowSizeSeconds: ptr.Of(int64(a.metricsInterval.Seconds())), - } - allMetrics.Lapi.Metrics = make([]*models.MetricsDetailItem, 0) + allMetrics.Lapi.Metrics = make([]*models.DetailedMetrics, 0) + + allMetrics.Lapi.Metrics = append(allMetrics.Lapi.Metrics, &models.DetailedMetrics{ + Meta: &models.MetricsMeta{ + UtcNowTimestamp: ptr.Of(time.Now().UTC().Unix()), + WindowSizeSeconds: ptr.Of(int64(a.metricsInterval.Seconds())), + }, + Items: make([]*models.MetricsDetailItem, 0), + }) //Force an actual slice to avoid non existing fields in the json if allMetrics.RemediationComponents == nil { diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index b9ba23a1cce..04f53bea028 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -87,7 +87,6 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { payload = map[string]any{ "datasources": item0.Datasources, "metrics": item0.Metrics, - "meta": item0.Meta, } baseMetrics = item0.BaseMetrics hubItems = item0.HubItems @@ -106,7 +105,6 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { payload = map[string]any{ "type": item0.Type, "metrics": item0.Metrics, - "meta": item0.Meta, } baseMetrics = item0.BaseMetrics default: @@ -121,7 +119,12 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } - collectedAt = time.Unix(*baseMetrics.Meta.UtcNowTimestamp, 0).UTC() + if baseMetrics.Metrics != nil && len(baseMetrics.Metrics) > 0 { + collectedAt = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0).UTC() + } else { + // if there's no timestamp, use the current time + collectedAt = time.Now().UTC() + } jsonPayload, err := json.Marshal(payload) if err != nil { diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 28fb84c84c4..c2ccd037040 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -22,7 +22,8 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models. os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - heartbeat := time.Unix(*baseMetrics.Meta.UtcNowTimestamp, 0) + //FIXME: nil deref + heartbeat := time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) _, err := c.Ent.Machine. Update(). diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go index 132ecf09c29..154d9004afe 100644 --- a/pkg/models/base_metrics.go +++ b/pkg/models/base_metrics.go @@ -23,15 +23,16 @@ type BaseMetrics struct { // feature flags (expected to be empty for remediation components) FeatureFlags []string `json:"feature_flags"` - // metrics meta - Meta *MetricsMeta `json:"meta,omitempty"` - // metrics details - Metrics []*MetricsDetailItem `json:"metrics"` + Metrics []*DetailedMetrics `json:"metrics"` - // OS information + // os Os *OSversion `json:"os,omitempty"` + // UTC timestamp of the startup of the software + // Required: true + UtcStartupTimestamp *int64 `json:"utc_startup_timestamp"` + // version of the remediation component // Required: true Version *string `json:"version"` @@ -41,15 +42,15 @@ type BaseMetrics struct { func (m *BaseMetrics) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateMeta(formats); err != nil { + if err := m.validateMetrics(formats); err != nil { res = append(res, err) } - if err := m.validateMetrics(formats); err != nil { + if err := m.validateOs(formats); err != nil { res = append(res, err) } - if err := m.validateOs(formats); err != nil { + if err := m.validateUtcStartupTimestamp(formats); err != nil { res = append(res, err) } @@ -63,25 +64,6 @@ func (m *BaseMetrics) Validate(formats strfmt.Registry) error { return nil } -func (m *BaseMetrics) validateMeta(formats strfmt.Registry) error { - if swag.IsZero(m.Meta) { // not required - return nil - } - - if m.Meta != nil { - if err := m.Meta.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("meta") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("meta") - } - return err - } - } - - return nil -} - func (m *BaseMetrics) validateMetrics(formats strfmt.Registry) error { if swag.IsZero(m.Metrics) { // not required return nil @@ -127,6 +109,15 @@ func (m *BaseMetrics) validateOs(formats strfmt.Registry) error { return nil } +func (m *BaseMetrics) validateUtcStartupTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_startup_timestamp", "body", m.UtcStartupTimestamp); err != nil { + return err + } + + return nil +} + func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { if err := validate.Required("version", "body", m.Version); err != nil { @@ -140,10 +131,6 @@ func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { func (m *BaseMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error - if err := m.contextValidateMeta(ctx, formats); err != nil { - res = append(res, err) - } - if err := m.contextValidateMetrics(ctx, formats); err != nil { res = append(res, err) } @@ -158,27 +145,6 @@ func (m *BaseMetrics) ContextValidate(ctx context.Context, formats strfmt.Regist return nil } -func (m *BaseMetrics) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { - - if m.Meta != nil { - - if swag.IsZero(m.Meta) { // not required - return nil - } - - if err := m.Meta.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("meta") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("meta") - } - return err - } - } - - return nil -} - func (m *BaseMetrics) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error { for i := 0; i < len(m.Metrics); i++ { diff --git a/pkg/models/detailed_metrics.go b/pkg/models/detailed_metrics.go new file mode 100644 index 00000000000..9e605ed8c88 --- /dev/null +++ b/pkg/models/detailed_metrics.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DetailedMetrics DetailedMetrics +// +// swagger:model DetailedMetrics +type DetailedMetrics struct { + + // items + // Required: true + Items []*MetricsDetailItem `json:"items"` + + // meta + // Required: true + Meta *MetricsMeta `json:"meta"` +} + +// Validate validates this detailed metrics +func (m *DetailedMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateItems(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DetailedMetrics) validateItems(formats strfmt.Registry) error { + + if err := validate.Required("items", "body", m.Items); err != nil { + return err + } + + for i := 0; i < len(m.Items); i++ { + if swag.IsZero(m.Items[i]) { // not required + continue + } + + if m.Items[i] != nil { + if err := m.Items[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *DetailedMetrics) validateMeta(formats strfmt.Registry) error { + + if err := validate.Required("meta", "body", m.Meta); err != nil { + return err + } + + if m.Meta != nil { + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// ContextValidate validate this detailed metrics based on the context it is used +func (m *DetailedMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateItems(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DetailedMetrics) contextValidateItems(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Items); i++ { + + if m.Items[i] != nil { + + if swag.IsZero(m.Items[i]) { // not required + return nil + } + + if err := m.Items[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *DetailedMetrics) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if m.Meta != nil { + + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DetailedMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DetailedMetrics) UnmarshalBinary(b []byte) error { + var res DetailedMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 3fa8a4b7b84..7b3bc023a5d 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -689,7 +689,7 @@ paths: description: Post usage metrics from a LP or a bouncer summary: Send usage metrics tags: - - bouncers + - Remediation component - watchers operationId: usage-metrics produces: @@ -1095,26 +1095,24 @@ definitions: version: type: string description: version of the remediation component - meta: - type: object - $ref: '#/definitions/MetricsMeta' - description: metrics meta os: - type: object $ref: '#/definitions/OSversion' - description: OS information metrics: type: array items: - $ref: '#/definitions/MetricsDetailItem' + $ref: '#/definitions/DetailedMetrics' description: metrics details feature_flags: type: array items: type: string description: feature flags (expected to be empty for remediation components) + utc_startup_timestamp: + type: integer + description: UTC timestamp of the startup of the software required: - version + - utc_startup_timestamp OSversion: title: OSversion type: object @@ -1128,6 +1126,19 @@ definitions: required: - name - version + DetailedMetrics: + type: object + title: DetailedMetrics + properties: + items: + type: array + items: + $ref: '#/definitions/MetricsDetailItem' + meta: + $ref: '#/definitions/MetricsMeta' + required: + - meta + - items MetricsDetailItem: title: MetricsDetailItem type: object @@ -1155,15 +1166,11 @@ definitions: window_size_seconds: type: integer description: Size, in seconds, of the window used to compute the metric - utc_startup_timestamp: - type: integer - description: UTC timestamp of the startup of the software utc_now_timestamp: type: integer description: UTC timestamp of the current time required: - window_size_seconds - - utc_startup_timestamp - utc_now_timestamp MetricsLabels: title: MetricsLabels diff --git a/pkg/models/metrics_meta.go b/pkg/models/metrics_meta.go index faa5a975b2c..b021617e4d9 100644 --- a/pkg/models/metrics_meta.go +++ b/pkg/models/metrics_meta.go @@ -23,10 +23,6 @@ type MetricsMeta struct { // Required: true UtcNowTimestamp *int64 `json:"utc_now_timestamp"` - // UTC timestamp of the startup of the software - // Required: true - UtcStartupTimestamp *int64 `json:"utc_startup_timestamp"` - // Size, in seconds, of the window used to compute the metric // Required: true WindowSizeSeconds *int64 `json:"window_size_seconds"` @@ -40,10 +36,6 @@ func (m *MetricsMeta) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateUtcStartupTimestamp(formats); err != nil { - res = append(res, err) - } - if err := m.validateWindowSizeSeconds(formats); err != nil { res = append(res, err) } @@ -63,15 +55,6 @@ func (m *MetricsMeta) validateUtcNowTimestamp(formats strfmt.Registry) error { return nil } -func (m *MetricsMeta) validateUtcStartupTimestamp(formats strfmt.Registry) error { - - if err := validate.Required("utc_startup_timestamp", "body", m.UtcStartupTimestamp); err != nil { - return err - } - - return nil -} - func (m *MetricsMeta) validateWindowSizeSeconds(formats strfmt.Registry) error { if err := validate.Required("window_size_seconds", "body", m.WindowSizeSeconds); err != nil { From df43eaf7e60fda4b6cbde401076d355036d43fa4 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 25 Jun 2024 13:43:18 +0200 Subject: [PATCH 033/119] up --- pkg/apiserver/apic_metrics.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 51eba39b328..70d0d56f014 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -21,8 +21,8 @@ import ( ) type dbPayload struct { - Metrics *models.DetailedMetrics `json:"metrics"` - Datasources map[string]int64 `json:"datasources"` + Metrics []*models.DetailedMetrics `json:"metrics"` + Datasources map[string]int64 `json:"datasources"` } func detectOS() (string, string) { @@ -87,7 +87,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics) + rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics...) } allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) @@ -133,7 +133,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { continue } - lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics) + lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics...) for k, v := range dbPayload.Datasources { lpMetrics.Datasources[k] = v From 727b2e3080a5b75ca2b8a94672e1ce607ae56139 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 25 Jun 2024 13:59:17 +0200 Subject: [PATCH 034/119] remove feature flag --- pkg/apiserver/apiserver.go | 14 ++++---------- pkg/fflag/crowdsec.go | 6 ------ 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index b51a13d0e9e..3936bad06c6 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -25,7 +25,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" - "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -367,15 +366,10 @@ func (s *APIServer) Run(apiReady chan bool) error { return nil }) - if fflag.CAPIUsageMetrics.IsEnabled() { - log.Infof("CAPI_USAGE_METRICS flag is enabled, starting usage metrics routine") - s.apic.metricsTomb.Go(func() error { - //staticMetrics := NewStaticMetrics(consoleOptions, datasources, hub) - - s.apic.SendUsageMetrics() - return nil - }) - } + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics() + return nil + }) } diff --git a/pkg/fflag/crowdsec.go b/pkg/fflag/crowdsec.go index e5c80a19be2..d42d6a05ef6 100644 --- a/pkg/fflag/crowdsec.go +++ b/pkg/fflag/crowdsec.go @@ -8,7 +8,6 @@ var ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Descript var PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} var Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} var Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} -var CAPIUsageMetrics = &Feature{Name: "capi_usage_metrics", Description: "Enable usage metrics push to CAPI"} func RegisterAllFeatures() error { err := Crowdsec.RegisterFeature(CscliSetup) @@ -41,10 +40,5 @@ func RegisterAllFeatures() error { return err } - err = Crowdsec.RegisterFeature(CAPIUsageMetrics) - if err != nil { - return err - } - return nil } From 1cb56756a4653ebce61f9a8faa864643dfcb718d Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Wed, 26 Jun 2024 11:10:21 +0200 Subject: [PATCH 035/119] mark metrics as sent on 422 --- pkg/apiserver/apic_metrics.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 70d0d56f014..73993c22e27 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -3,6 +3,7 @@ package apiserver import ( "context" "encoding/json" + "net/http" "strings" "time" @@ -351,11 +352,14 @@ func (a *apic) SendUsageMetrics() { continue } - _, _, err = a.apiClient.UsageMetrics.Add(context.Background(), metrics) + _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) if err != nil { log.Errorf("unable to send usage metrics: %s", err) - continue + if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { + //In case of 422, mark the metrics as sent anyway, the API did not like what we sent, and it's unlikely we'll be able to fix it + continue + } } err = a.MarkUsageMetricsAsSent(metricsId) if err != nil { From 95b10823bce6d4f425b7b4d334cac42ff9b8c0ef Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Wed, 26 Jun 2024 14:30:42 +0200 Subject: [PATCH 036/119] always send datasources --- pkg/apiserver/apic_metrics.go | 9 +-- pkg/apiserver/controllers/v1/usagemetrics.go | 11 +-- pkg/database/ent/machine.go | 15 +++- pkg/database/ent/machine/machine.go | 3 + pkg/database/ent/machine/where.go | 10 +++ pkg/database/ent/machine_create.go | 10 +++ pkg/database/ent/machine_update.go | 36 ++++++++++ pkg/database/ent/migrate/schema.go | 1 + pkg/database/ent/mutation.go | 75 +++++++++++++++++++- pkg/database/ent/schema/machine.go | 1 + pkg/database/machines.go | 3 +- 11 files changed, 159 insertions(+), 15 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 73993c22e27..82ae6e8db6b 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -22,8 +22,7 @@ import ( ) type dbPayload struct { - Metrics []*models.DetailedMetrics `json:"metrics"` - Datasources map[string]int64 `json:"datasources"` + Metrics []*models.DetailedMetrics `json:"metrics"` } func detectOS() (string, string) { @@ -114,7 +113,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics.LastPush = lp.LastPush.UTC().Unix() lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() - lpMetrics.Datasources = make(map[string]int64) + lpMetrics.Datasources = lp.Datasources if lp.Hubstate != nil { lpMetrics.HubItems = *lp.Hubstate @@ -135,10 +134,6 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { } lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics...) - - for k, v := range dbPayload.Datasources { - lpMetrics.Datasources[k] = v - } } allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 04f53bea028..cc0134c4188 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -16,10 +16,10 @@ import ( ) // updateBaseMetrics updates the base metrics for a machine or bouncer -func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics *models.BaseMetrics, hubItems *models.HubItems) error { +func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics *models.BaseMetrics, hubItems *models.HubItems, datasources map[string]int64) error { switch { case machineID != "": - c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems) + c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) case bouncer != nil: c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) default: @@ -75,6 +75,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { payload map[string]any baseMetrics models.BaseMetrics hubItems models.HubItems + datasources map[string]int64 ) switch len(input.LogProcessors) { @@ -85,11 +86,11 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { // guaranteed by the swagger schema item0 := input.LogProcessors[0] payload = map[string]any{ - "datasources": item0.Datasources, - "metrics": item0.Metrics, + "metrics": item0.Metrics, } baseMetrics = item0.BaseMetrics hubItems = item0.HubItems + datasources = item0.Datasources default: log.Errorf("Payload has more than one log processor") // this is not checked in the swagger schema @@ -112,7 +113,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } - err := c.updateBaseMetrics(machineID, bouncer, &baseMetrics, &hubItems) + err := c.updateBaseMetrics(machineID, bouncer, &baseMetrics, &hubItems, datasources) if err != nil { log.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index f2cc45c6abc..67fe0702ce2 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -51,6 +51,8 @@ type Machine struct { Featureflags string `json:"featureflags,omitempty"` // Hubstate holds the value of the "hubstate" field. Hubstate *models.HubItems `json:"hubstate,omitempty"` + // Datasources holds the value of the "datasources" field. + Datasources map[string]int64 `json:"datasources,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MachineQuery when eager-loading is set. Edges MachineEdges `json:"edges"` @@ -80,7 +82,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case machine.FieldHubstate: + case machine.FieldHubstate, machine.FieldDatasources: values[i] = new([]byte) case machine.FieldIsValidated: values[i] = new(sql.NullBool) @@ -211,6 +213,14 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unmarshal field hubstate: %w", err) } } + case machine.FieldDatasources: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field datasources", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &m.Datasources); err != nil { + return fmt.Errorf("unmarshal field datasources: %w", err) + } + } default: m.selectValues.Set(columns[i], values[i]) } @@ -302,6 +312,9 @@ func (m *Machine) String() string { builder.WriteString(", ") builder.WriteString("hubstate=") builder.WriteString(fmt.Sprintf("%v", m.Hubstate)) + builder.WriteString(", ") + builder.WriteString("datasources=") + builder.WriteString(fmt.Sprintf("%v", m.Datasources)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 614050ca908..2a054d0c873 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -46,6 +46,8 @@ const ( FieldFeatureflags = "featureflags" // FieldHubstate holds the string denoting the hubstate field in the database. FieldHubstate = "hubstate" + // FieldDatasources holds the string denoting the datasources field in the database. + FieldDatasources = "datasources" // EdgeAlerts holds the string denoting the alerts edge name in mutations. EdgeAlerts = "alerts" // Table holds the table name of the machine in the database. @@ -78,6 +80,7 @@ var Columns = []string{ FieldOsversion, FieldFeatureflags, FieldHubstate, + FieldDatasources, } // ValidColumn reports if the column name is valid (part of the table columns). diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index c84ad30f240..aca66135f5c 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -1040,6 +1040,16 @@ func HubstateNotNil() predicate.Machine { return predicate.Machine(sql.FieldNotNull(FieldHubstate)) } +// DatasourcesIsNil applies the IsNil predicate on the "datasources" field. +func DatasourcesIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldDatasources)) +} + +// DatasourcesNotNil applies the NotNil predicate on the "datasources" field. +func DatasourcesNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldDatasources)) +} + // HasAlerts applies the HasEdge predicate on the "alerts" edge. func HasAlerts() predicate.Machine { return predicate.Machine(func(s *sql.Selector) { diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index d6bd53a3d91..95e4c9f6ae6 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -214,6 +214,12 @@ func (mc *MachineCreate) SetHubstate(mi *models.HubItems) *MachineCreate { return mc } +// SetDatasources sets the "datasources" field. +func (mc *MachineCreate) SetDatasources(m map[string]int64) *MachineCreate { + mc.mutation.SetDatasources(m) + return mc +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mc *MachineCreate) AddAlertIDs(ids ...int) *MachineCreate { mc.mutation.AddAlertIDs(ids...) @@ -408,6 +414,10 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) _node.Hubstate = value } + if value, ok := mc.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + _node.Datasources = value + } if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index 6824ab10aaa..01cf62bda08 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -264,6 +264,18 @@ func (mu *MachineUpdate) ClearHubstate() *MachineUpdate { return mu } +// SetDatasources sets the "datasources" field. +func (mu *MachineUpdate) SetDatasources(m map[string]int64) *MachineUpdate { + mu.mutation.SetDatasources(m) + return mu +} + +// ClearDatasources clears the value of the "datasources" field. +func (mu *MachineUpdate) ClearDatasources() *MachineUpdate { + mu.mutation.ClearDatasources() + return mu +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mu *MachineUpdate) AddAlertIDs(ids ...int) *MachineUpdate { mu.mutation.AddAlertIDs(ids...) @@ -432,6 +444,12 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if mu.mutation.HubstateCleared() { _spec.ClearField(machine.FieldHubstate, field.TypeJSON) } + if value, ok := mu.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + } + if mu.mutation.DatasourcesCleared() { + _spec.ClearField(machine.FieldDatasources, field.TypeJSON) + } if mu.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -731,6 +749,18 @@ func (muo *MachineUpdateOne) ClearHubstate() *MachineUpdateOne { return muo } +// SetDatasources sets the "datasources" field. +func (muo *MachineUpdateOne) SetDatasources(m map[string]int64) *MachineUpdateOne { + muo.mutation.SetDatasources(m) + return muo +} + +// ClearDatasources clears the value of the "datasources" field. +func (muo *MachineUpdateOne) ClearDatasources() *MachineUpdateOne { + muo.mutation.ClearDatasources() + return muo +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (muo *MachineUpdateOne) AddAlertIDs(ids ...int) *MachineUpdateOne { muo.mutation.AddAlertIDs(ids...) @@ -929,6 +959,12 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if muo.mutation.HubstateCleared() { _spec.ClearField(machine.FieldHubstate, field.TypeJSON) } + if value, ok := muo.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + } + if muo.mutation.DatasourcesCleared() { + _spec.ClearField(machine.FieldDatasources, field.TypeJSON) + } if muo.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 9e2827daeef..b1fe52bd6eb 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -211,6 +211,7 @@ var ( {Name: "osversion", Type: field.TypeString, Nullable: true}, {Name: "featureflags", Type: field.TypeString, Nullable: true}, {Name: "hubstate", Type: field.TypeJSON, Nullable: true}, + {Name: "datasources", Type: field.TypeJSON, Nullable: true}, } // MachinesTable holds the schema information for the "machines" table. MachinesTable = &schema.Table{ diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index ab0355f69e6..e10f54234d3 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -6525,6 +6525,7 @@ type MachineMutation struct { osversion *string featureflags *string hubstate **models.HubItems + datasources *map[string]int64 clearedFields map[string]struct{} alerts map[int]struct{} removedalerts map[int]struct{} @@ -7325,6 +7326,55 @@ func (m *MachineMutation) ResetHubstate() { delete(m.clearedFields, machine.FieldHubstate) } +// SetDatasources sets the "datasources" field. +func (m *MachineMutation) SetDatasources(value map[string]int64) { + m.datasources = &value +} + +// Datasources returns the value of the "datasources" field in the mutation. +func (m *MachineMutation) Datasources() (r map[string]int64, exists bool) { + v := m.datasources + if v == nil { + return + } + return *v, true +} + +// OldDatasources returns the old "datasources" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldDatasources(ctx context.Context) (v map[string]int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDatasources is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDatasources requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDatasources: %w", err) + } + return oldValue.Datasources, nil +} + +// ClearDatasources clears the value of the "datasources" field. +func (m *MachineMutation) ClearDatasources() { + m.datasources = nil + m.clearedFields[machine.FieldDatasources] = struct{}{} +} + +// DatasourcesCleared returns if the "datasources" field was cleared in this mutation. +func (m *MachineMutation) DatasourcesCleared() bool { + _, ok := m.clearedFields[machine.FieldDatasources] + return ok +} + +// ResetDatasources resets all changes to the "datasources" field. +func (m *MachineMutation) ResetDatasources() { + m.datasources = nil + delete(m.clearedFields, machine.FieldDatasources) +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by ids. func (m *MachineMutation) AddAlertIDs(ids ...int) { if m.alerts == nil { @@ -7413,7 +7463,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 16) + fields := make([]string, 0, 17) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7462,6 +7512,9 @@ func (m *MachineMutation) Fields() []string { if m.hubstate != nil { fields = append(fields, machine.FieldHubstate) } + if m.datasources != nil { + fields = append(fields, machine.FieldDatasources) + } return fields } @@ -7502,6 +7555,8 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Featureflags() case machine.FieldHubstate: return m.Hubstate() + case machine.FieldDatasources: + return m.Datasources() } return nil, false } @@ -7543,6 +7598,8 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldFeatureflags(ctx) case machine.FieldHubstate: return m.OldHubstate(ctx) + case machine.FieldDatasources: + return m.OldDatasources(ctx) } return nil, fmt.Errorf("unknown Machine field %s", name) } @@ -7664,6 +7721,13 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetHubstate(v) return nil + case machine.FieldDatasources: + v, ok := value.(map[string]int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDatasources(v) + return nil } return fmt.Errorf("unknown Machine field %s", name) } @@ -7721,6 +7785,9 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldHubstate) { fields = append(fields, machine.FieldHubstate) } + if m.FieldCleared(machine.FieldDatasources) { + fields = append(fields, machine.FieldDatasources) + } return fields } @@ -7762,6 +7829,9 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldHubstate: m.ClearHubstate() return nil + case machine.FieldDatasources: + m.ClearDatasources() + return nil } return fmt.Errorf("unknown Machine nullable field %s", name) } @@ -7818,6 +7888,9 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldHubstate: m.ResetHubstate() return nil + case machine.FieldDatasources: + m.ResetDatasources() + return nil } return fmt.Errorf("unknown Machine field %s", name) } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index ae028d345b2..847427b7835 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -43,6 +43,7 @@ func (Machine) Fields() []ent.Field { field.String("osversion").Optional(), field.String("featureflags").Optional(), field.JSON("hubstate", &models.HubItems{}).Optional(), + field.JSON("datasources", map[string]int64{}).Optional(), } } diff --git a/pkg/database/machines.go b/pkg/database/machines.go index c2ccd037040..55f0a7f21ac 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -18,7 +18,7 @@ import ( const CapiMachineID = types.CAPIOrigin const CapiListsMachineID = types.ListOrigin -func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models.BaseMetrics, hubItems *models.HubItems) error { +func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models.BaseMetrics, hubItems *models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -34,6 +34,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models. SetFeatureflags(features). SetLastHeartbeat(heartbeat). SetHubstate(hubItems). + SetDatasources(datasources). // TODO: update scenarios Save(c.CTX) if err != nil { From 4ab5a53a1cc2ba5d3d3de23530976e2e512f500e Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 28 Jun 2024 14:41:47 +0200 Subject: [PATCH 037/119] update functional test; workaround for openapi verbose error --- pkg/apiserver/controllers/v1/errors.go | 29 ++++++++++ pkg/apiserver/controllers/v1/errors_test.go | 58 ++++++++++++++++++++ pkg/apiserver/controllers/v1/usagemetrics.go | 9 ++- test/bats/30_machines.bats | 55 ++++++++++++++++--- 4 files changed, 142 insertions(+), 9 deletions(-) create mode 100644 pkg/apiserver/controllers/v1/errors_test.go diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index b85b811f8a7..d9355f66c1f 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -2,6 +2,7 @@ package v1 import ( "net/http" + "strings" "github.com/gin-gonic/gin" "github.com/pkg/errors" @@ -37,3 +38,31 @@ func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { return } } + +// collapseRepeatedPrefix collapses repeated occurrences of a given prefix in the text +func collapseRepeatedPrefix(text string, prefix string) string { + count := 0 + for strings.HasPrefix(text, prefix) { + count++ + text = strings.TrimPrefix(text, prefix) + } + if count > 0 { + return prefix + text + } + return text +} + + +// RepeatedPrefixError wraps an error and removes the repeating prefix from its message +type RepeatedPrefixError struct { + OriginalError error + Prefix string +} + +func (e RepeatedPrefixError) Error() string { + return collapseRepeatedPrefix(e.OriginalError.Error(), e.Prefix) +} + +func (e RepeatedPrefixError) Unwrap() error { + return e.OriginalError +} diff --git a/pkg/apiserver/controllers/v1/errors_test.go b/pkg/apiserver/controllers/v1/errors_test.go new file mode 100644 index 00000000000..93fa956bd15 --- /dev/null +++ b/pkg/apiserver/controllers/v1/errors_test.go @@ -0,0 +1,58 @@ +package v1 + +import ( + "errors" + + "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCollapseRepeatedPrefix(t *testing.T) { + tests := []struct { + input string + prefix string + want string + }{ + { + input: "aaabbbcccaaa", + prefix: "aaa", + want: "aaabbbcccaaa"}, + { + input: "hellohellohello world", + prefix: "hello", + want: "hello world"}, + { + input: "ababababxyz", + prefix: "ab", + want: "abxyz", + }, + { + input: "xyzxyzxyzxyzxyz", + prefix: "xyz", + want: "xyz"}, + { + input: "123123123456", + prefix: "456", + want: "123123123456", + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + assert.Equal(t, tt.want, collapseRepeatedPrefix(tt.input, tt.prefix)) + }) + } +} + +func TestRepeatedPrefixError(t *testing.T) { + originalErr := errors.New("hellohellohello world") + wrappedErr := RepeatedPrefixError{OriginalError: originalErr, Prefix: "hello"} + + want := "hello world" + + assert.Equal(t, want, wrappedErr.Error()) + + assert.Equal(t, originalErr, errors.Unwrap(wrappedErr)) + require.ErrorIs(t, wrappedErr, originalErr) +} diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index cc0134c4188..8da5c45072f 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -42,8 +42,13 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { } if err := input.Validate(strfmt.Default); err != nil { - log.Errorf("Failed to validate usage metrics: %s", err) - c.HandleDBErrors(gctx, err) + // work around a nuisance in the generated code + cleanErr := RepeatedPrefixError{ + OriginalError: err, + Prefix: "validation failure list:\n", + } + log.Errorf("Failed to validate usage metrics: %s", cleanErr) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": cleanErr.Error()}) return } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index d111fd95559..c6b43666e98 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -115,21 +115,59 @@ teardown() { assert_output 'No machines to prune.' } -@test "usage metrics" { +@test "usage metrics (empty payload)" { # a registered log processor can send metrics for the console token=$(lp_login) usage_metrics="http://localhost:8080/v1/usage-metrics" + payload=$(cat <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + refute_output + refute_stderr +} + +@test "usage metrics (bad payload)" { + token=$(lp_login) + usage_metrics="http://localhost:8080/v1/usage-metrics" + + payload=$(cat <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + EOT + ) + + rune -22 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + assert_stderr "curl: (22) The requested URL returned error: 422" + + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + log_processors.0.utc_startup_timestamp in body is required + log_processors.0.datasources in body is required + log_processors.0.hub_items in body is required + EOT + +} + +@test "usage metrics (full payload)" { + token=$(lp_login) + usage_metrics="http://localhost:8080/v1/usage-metrics" + payload=$(cat <<-EOT remediation_components: [] log_processors: - version: "v1.0" + utc_startup_timestamp: 1707399316 + hub_items: {} feature_flags: - marshmallows - meta: - window_size_seconds: 600 - utc_startup_timestamp: 1707399316 - utc_now_timestamp: 1707485349 os: name: CentOS version: "8" @@ -138,6 +176,10 @@ teardown() { value: 5000 unit: count labels: {} + items: [] + meta: + window_size_seconds: 600 + utc_now_timestamp: 1707485349 console_options: - share_context datasources: @@ -146,7 +188,6 @@ teardown() { EOT ) - echo -e "$payload" >/tmp/bbb - rune -0 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" refute_output } From ea70f258c2b875580150bd2d1968b05d79e5aa98 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 28 Jun 2024 14:55:17 +0200 Subject: [PATCH 038/119] redundant import alias --- pkg/apiserver/apiserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 92637c87974..373a1600ad6 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" From 518cd869ca3e761fc6b0974a083ab0de409a4f79 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 28 Jun 2024 14:55:46 +0200 Subject: [PATCH 039/119] split metrics func tests to own file --- pkg/apiserver/controllers/v1/errors.go | 1 - test/bats/30_machines.bats | 77 ------------------- test/bats/30_machines_metrics.bats | 101 +++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 78 deletions(-) create mode 100644 test/bats/30_machines_metrics.bats diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index d9355f66c1f..17ce6a4f1c7 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -52,7 +52,6 @@ func collapseRepeatedPrefix(text string, prefix string) string { return text } - // RepeatedPrefixError wraps an error and removes the repeating prefix from its message type RepeatedPrefixError struct { OriginalError error diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index c6b43666e98..1d65151b6c8 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -114,80 +114,3 @@ teardown() { rune -0 cscli machines prune assert_output 'No machines to prune.' } - -@test "usage metrics (empty payload)" { - # a registered log processor can send metrics for the console - token=$(lp_login) - usage_metrics="http://localhost:8080/v1/usage-metrics" - - payload=$(cat <<-EOT - remediation_components: [] - log_processors: [] - EOT - ) - - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" - refute_output - refute_stderr -} - -@test "usage metrics (bad payload)" { - token=$(lp_login) - usage_metrics="http://localhost:8080/v1/usage-metrics" - - payload=$(cat <<-EOT - remediation_components: [] - log_processors: - - version: "v1.0" - EOT - ) - - rune -22 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" - assert_stderr "curl: (22) The requested URL returned error: 422" - - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" - rune -0 jq -r '.message' <(output) - assert_output - <<-EOT - validation failure list: - log_processors.0.utc_startup_timestamp in body is required - log_processors.0.datasources in body is required - log_processors.0.hub_items in body is required - EOT - -} - -@test "usage metrics (full payload)" { - token=$(lp_login) - usage_metrics="http://localhost:8080/v1/usage-metrics" - - payload=$(cat <<-EOT - remediation_components: [] - log_processors: - - version: "v1.0" - utc_startup_timestamp: 1707399316 - hub_items: {} - feature_flags: - - marshmallows - os: - name: CentOS - version: "8" - metrics: - - name: logs_parsed - value: 5000 - unit: count - labels: {} - items: [] - meta: - window_size_seconds: 600 - utc_now_timestamp: 1707485349 - console_options: - - share_context - datasources: - syslog: 1 - file: 4 - EOT - ) - - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" - refute_output -} diff --git a/test/bats/30_machines_metrics.bats b/test/bats/30_machines_metrics.bats new file mode 100644 index 00000000000..22f73587e6b --- /dev/null +++ b/test/bats/30_machines_metrics.bats @@ -0,0 +1,101 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "usage metrics (empty payload)" { + # a registered log processor can send metrics for the console + token=$(lp_login) + usage_metrics="http://localhost:8080/v1/usage-metrics" + + payload=$(cat <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + refute_output + refute_stderr +} + +@test "usage metrics (bad payload)" { + token=$(lp_login) + usage_metrics="http://localhost:8080/v1/usage-metrics" + + payload=$(cat <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + EOT + ) + + rune -22 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + assert_stderr "curl: (22) The requested URL returned error: 422" + + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + log_processors.0.utc_startup_timestamp in body is required + log_processors.0.datasources in body is required + log_processors.0.hub_items in body is required + EOT + +} + +@test "usage metrics (full payload)" { + token=$(lp_login) + usage_metrics="http://localhost:8080/v1/usage-metrics" + + payload=$(cat <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + hub_items: {} + feature_flags: + - marshmallows + os: + name: CentOS + version: "8" + metrics: + - name: logs_parsed + value: 5000 + unit: count + labels: {} + items: [] + meta: + window_size_seconds: 600 + utc_now_timestamp: 1707485349 + console_options: + - share_context + datasources: + syslog: 1 + file: 4 + EOT + ) + + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + refute_output +} From be4dc1d3d329787ff5f2538d160e73137cafec45 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 1 Jul 2024 13:42:01 +0200 Subject: [PATCH 040/119] wip machines and hub state --- cmd/crowdsec-cli/machines.go | 10 +++++----- pkg/apiserver/apic_metrics.go | 11 +++++++++-- pkg/apiserver/controllers/v1/usagemetrics.go | 4 ++-- pkg/database/bouncers.go | 2 +- pkg/database/machines.go | 13 +++++++++++-- 5 files changed, 28 insertions(+), 12 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 761aa62037c..6dedcd5693b 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -29,7 +29,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -156,7 +156,7 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func showItems(out io.Writer, itemType string, items map[string]models.HubItem) { +func showItems(out io.Writer, itemType string, items map[string]schema.ItemState) { t := newLightTable(out) t.SetHeaders("Name", "Status", "Version") t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) @@ -173,10 +173,10 @@ func showItems(out io.Writer, itemType string, items map[string]models.HubItem) func showHubState(out io.Writer, machines []*ent.Machine) { //FIXME: ugly - items := make(map[string]map[string]models.HubItem) + items := make(map[string]map[string]schema.ItemState) for _, itemType := range cwhub.ItemTypes { - items[itemType] = map[string]models.HubItem{} + items[itemType] = map[string]schema.ItemState{} } for _, machine := range machines { @@ -186,7 +186,7 @@ func showHubState(out io.Writer, machines []*ent.Machine) { continue } - for name, item := range *state { + for name, item := range state { //here, name is type:actual_name //we want to split it to get the type split := strings.Split(name, ":") diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 2c0602def2c..c73ced2510a 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -93,7 +93,6 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { } for _, lp := range lps { - dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) if err != nil { log.Errorf("unable to get LP usage metrics: %s", err) @@ -115,7 +114,15 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics.Datasources = lp.Datasources if lp.Hubstate != nil { - lpMetrics.HubItems = *lp.Hubstate + // must carry over the hub state even if nothing is installed + hubItems := models.HubItems{} + for name, item := range lp.Hubstate { + hubItems[name] = models.HubItem{ + Version: item.Version, + Status: item.Status, + } + lpMetrics.HubItems = hubItems + } } lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 8da5c45072f..df83aed583c 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -16,7 +16,7 @@ import ( ) // updateBaseMetrics updates the base metrics for a machine or bouncer -func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics *models.BaseMetrics, hubItems *models.HubItems, datasources map[string]int64) error { +func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { switch { case machineID != "": c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) @@ -118,7 +118,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } - err := c.updateBaseMetrics(machineID, bouncer, &baseMetrics, &hubItems, datasources) + err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) if err != nil { log.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 9d230ef99b1..77cddd78152 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -12,7 +12,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics *models.BaseMetrics) error { +func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") diff --git a/pkg/database/machines.go b/pkg/database/machines.go index ff4e11921cc..273bdb7d97c 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -11,6 +11,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -20,13 +21,21 @@ const ( CapiListsMachineID = types.ListOrigin ) -func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models.BaseMetrics, hubItems *models.HubItems, datasources map[string]int64) error { +func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") //FIXME: nil deref heartbeat := time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) + hubState := map[string]schema.ItemState{} + for name, item := range hubItems { + hubState[name] = schema.ItemState{ + Version: item.Version, + Status: item.Status, + } + } + _, err := c.Ent.Machine. Update(). Where(machine.MachineIdEQ(machineID)). @@ -35,7 +44,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics *models. SetOsversion(*os.Version). SetFeatureflags(features). SetLastHeartbeat(heartbeat). - SetHubstate(hubItems). + SetHubstate(hubState). SetDatasources(datasources). // TODO: update scenarios Save(c.CTX) From 141ae11269733b10c4954951a5c50672020a7e37 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 2 Jul 2024 15:24:13 +0200 Subject: [PATCH 041/119] wip --- cmd/crowdsec-cli/bouncers.go | 9 +- cmd/crowdsec-cli/machines.go | 379 ++++++++++++++++++++++------ cmd/crowdsec-cli/machines_table.go | 44 ---- cmd/crowdsec-cli/support.go | 13 +- go.mod | 2 +- go.sum | 4 +- pkg/database/ent/machine.go | 15 +- pkg/database/ent/machine/machine.go | 8 - pkg/database/ent/machine/where.go | 80 ------ pkg/database/ent/machine_create.go | 18 -- pkg/database/ent/machine_update.go | 52 ---- pkg/database/ent/migrate/schema.go | 1 - pkg/database/ent/mutation.go | 75 +----- pkg/database/ent/runtime.go | 2 +- pkg/database/ent/schema/machine.go | 4 +- pkg/database/metrics.go | 1 + test/bats/30_machines.bats | 7 +- 17 files changed, 327 insertions(+), 387 deletions(-) delete mode 100644 cmd/crowdsec-cli/machines_table.go diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 3da9575146e..a392ff03c09 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -206,13 +206,14 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - // need to load config and db because PersistentPreRunE is not called for completions - +// validBouncerID returns a list of bouncer IDs for command completion +func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error cfg := cli.cfg() + // need to load config and db because PersistentPreRunE is not called for completions + if err = require.LAPI(cfg); err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -261,7 +262,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: cli.deleteValid, + ValidArgsFunction: cli.validBouncerID, RunE: func(_ *cobra.Command, args []string) error { return cli.delete(args) }, diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index ab758630195..932ed50a72b 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -14,23 +14,25 @@ import ( "time" "github.com/AlecAivazis/survey/v2" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/fatih/color" "github.com/go-openapi/strfmt" "github.com/google/uuid" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/machineid" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) const passwordLength = 64 @@ -156,107 +158,163 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func showItems(out io.Writer, itemType string, items map[string]schema.ItemState) { - t := newLightTable(out) - t.SetHeaders("Name", "Status", "Version") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - - for name, item := range items { - t.AddRow(name, item.Status, item.Version) +func splitFQName(fqName string) (string, string) { + parts := strings.Split(fqName, ":") + if len(parts) != 2 { + return "", "" } - - renderTableTitle(out, strings.ToUpper(itemType)) - t.Render() + return parts[0], parts[1] } -func showHubState(out io.Writer, machines []*ent.Machine) { - - //FIXME: ugly - items := make(map[string]map[string]schema.ItemState) +func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { + state := machine.Hubstate - for _, itemType := range cwhub.ItemTypes { - items[itemType] = map[string]schema.ItemState{} + if len(state) == 0 { + fmt.Println("No hub items found for this machine") + return } - for _, machine := range machines { - state := machine.Hubstate + // group state rows by type for multiple tables + rowsByType := make(map[string][]table.Row) - if state == nil { + // sort by type + name + fqNames := maptools.SortedKeysNoCase(state) + + for _, fqName := range fqNames { + item := state[fqName] + + // here, name is type:actual_name + // we want to split it to get the type + itemType, itemName := splitFQName(fqName) + if itemType == "" { + log.Warningf("invalid hub item name '%s'", fqName) continue } - for name, item := range state { - //here, name is type:actual_name - //we want to split it to get the type - split := strings.Split(name, ":") - if len(split) != 2 { - log.Warningf("invalid hub item name '%s'", name) - continue - } - items[split[0]][split[1]] = item - //items[split[0]] = append(items[split[0]], item) + if _, ok := rowsByType[itemType]; !ok { + rowsByType[itemType] = make([]table.Row, 0) } + + row := table.Row{itemName, item.Status, item.Version} + rowsByType[itemType] = append(rowsByType[itemType], row) } - for _, t := range cwhub.ItemTypes { - showItems(out, t, items[t]) + for itemType, rows := range rowsByType { + t := newTable(out).Writer + t.AppendHeader(table.Row{"Name", "Status", "Version"}) + t.SetTitle(itemType) + t.AppendRows(rows) + fmt.Fprintln(out, t.Render()) } } -func (cli *cliMachines) machinesShow(machines []*ent.Machine, showHub bool) error { - //FIXME: should showHub be used for json/raw output ? - out := color.Output +func (cli *cliMachines) listHuman(out io.Writer, machines []*ent.Machine) { + t := newLightTable(out).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) - switch cli.cfg().Cscli.Output { - case "human": - getAgentsTable(out, machines) - if showHub { - showHubState(out, machines) + for _, m := range machines { + validated := emoji.Prohibited + if m.IsValidated { + validated = emoji.CheckMark } - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - if err := enc.Encode(machines); err != nil { - return errors.New("failed to marshal") + hb, active := getLastHeartbeat(m) + if !active { + hb = emoji.Warning + " " + hb } - return nil - case "raw": - csvwriter := csv.NewWriter(out) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, fmt.Sprintf("%s/%s", m.Osname, m.Osversion), m.AuthType, hb}) + } - err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os", "feature_flags"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } + fmt.Fprintln(out, t.Render()) +} - for _, m := range machines { - validated := "false" - if m.IsValidated { - validated = "true" - } +func (cli *cliMachines) listJSON(out io.Writer, machines []*ent.Machine) error { + // only the info we want, no hub status, scenarios, edges, etc. + type machineInfo struct { + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + LastPush *time.Time `json:"last_push,omitempty"` + LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` + MachineId string `json:"machineId,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + Version string `json:"version,omitempty"` + IsValidated bool `json:"isValidated,omitempty"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` + Datasources map[string]int64 `json:"datasources,omitempty"` + } + + info := make([]machineInfo, 0, len(machines)) + for _, m := range machines { + info = append(info, machineInfo{ + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + LastPush: m.LastPush, + LastHeartbeat: m.LastHeartbeat, + MachineId: m.MachineId, + IpAddress: m.IpAddress, + Version: m.Version, + IsValidated: m.IsValidated, + AuthType: m.AuthType, + OS: m.Osname + "/" + m.Osversion, + Featureflags: strings.Split(m.Featureflags, ","), + Datasources: m.Datasources, + }) + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(info); err != nil { + return errors.New("failed to marshal") + } - hb, _ := getLastHeartbeat(m) + return nil +} - if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion), m.Featureflags}); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } +func (cli *cliMachines) listCSV(out io.Writer, machines []*ent.Machine) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + for _, m := range machines { + validated := "false" + if m.IsValidated { + validated = "true" } - csvwriter.Flush() + hb, _ := getLastHeartbeat(m) + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } } + + csvwriter.Flush() return nil } -func (cli *cliMachines) list() error { +func (cli *cliMachines) list(out io.Writer) error { machines, err := cli.db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %w", err) } - return cli.machinesShow(machines, false) + switch cli.cfg().Cscli.Output { + case "human": + cli.listHuman(out, machines) + case "json": + return cli.listJSON(out, machines) + case "raw": + return cli.listCSV(out, machines) + } + return nil } func (cli *cliMachines) newListCmd() *cobra.Command { @@ -268,7 +326,7 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() + return cli.list(color.Output) }, } @@ -410,13 +468,14 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - // need to load config and db because PersistentPreRunE is not called for completions - +// validMachineID returns a list of machine IDs for command completion +func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error cfg := cli.cfg() + // need to load config and db because PersistentPreRunE is not called for completions + if err = require.LAPI(cfg); err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -466,7 +525,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: cli.deleteValid, + ValidArgsFunction: cli.validMachineID, RunE: func(_ *cobra.Command, args []string) error { return cli.delete(args) }, @@ -503,7 +562,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b return nil } - getAgentsTable(color.Output, machines) + cli.listHuman(color.Output, machines) if !force { if yes, err := askYesNo( @@ -521,7 +580,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b return fmt.Errorf("unable to prune machines: %w", err) } - fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) + fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) return nil } @@ -583,13 +642,162 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { return cmd } -func (cli *cliMachines) inspect(machineID string, showHub bool) error { - machine, err := cli.db.QueryMachineByID(machineID) - if err != nil { - return fmt.Errorf("unable to get machine '%s': %w", machineID, err) +func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := newTable(out).Writer + + t.SetTitle("Machine: " + machine.MachineId) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + t.AppendRows([]table.Row{ + {"IP Address", machine.IpAddress}, + {"Created At", machine.CreatedAt}, + {"Last Update", machine.UpdatedAt}, + {"Last Heartbeat", machine.LastHeartbeat}, + {"Validated?", machine.IsValidated}, + {"CrowdSec version", machine.Version}, + {"OS", machine.Osname + "/" + machine.Osversion}, + {"Auth type", machine.AuthType}, + }) + + for dsName, dsCount := range machine.Datasources { + t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) + } + + for _, ff := range strings.Split(machine.Featureflags, ",") { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + for _, fqName := range maptools.SortedKeysNoCase(machine.Hubstate) { + itemType, itemName := splitFQName(fqName) + if itemType != cwhub.COLLECTIONS { + continue + } + + t.AppendRow(table.Row{"Collections", itemName}) } - return cli.machinesShow([]*ent.Machine{machine}, showHub) + fmt.Fprintln(out, t.Render()) +} + +func (*cliMachines) inspectJSON(out io.Writer, machine *ent.Machine) error { +// type view struct { +// MachineID string `json:"machineId"` +// Name string `json:"name"` +// LocalVersion string `json:"local_version"` +// LocalPath string `json:"local_path"` +// Description string `json:"description"` +// UTF8Status string `json:"utf8_status"` +// Status string `json:"status"` +// } +// +// hubStatus := make(map[string][]itemHubStatus) +// for _, itemType := range itemTypes { +// // empty slice in case there are no items of this type +// hubStatus[itemType] = make([]itemHubStatus, len(items[itemType])) +// +// for i, item := range items[itemType] { +// status := item.State.Text() +// statusEmo := item.State.Emoji() +// hubStatus[itemType][i] = itemHubStatus{ +// Name: item.Name, +// LocalVersion: item.State.LocalVersion, +// LocalPath: item.State.LocalPath, +// Description: item.Description, +// Status: status, +// UTF8Status: fmt.Sprintf("%v %s", statusEmo, status), +// } +// } +// } +// +// x, err := json.MarshalIndent(hubStatus, "", " ") +// if err != nil { +// return fmt.Errorf("failed to unmarshal: %w", err) +// } +// +// out.Write(x) +// +// +// +// enc := json.NewEncoder(out) +// enc.SetIndent("", " ") +// +// if err := enc.Encode(machineView); err != nil { +// return errors.New("failed to marshal") +// } +// + return nil +} + +func (cli *cliMachines) inspect(machine *ent.Machine) error { + out := color.Output + + switch cli.cfg().Cscli.Output { + case "human": + cli.inspectHuman(out, machine) + case "json": + if err := cli.inspectJSON(out, machine); err != nil { + return err + } + case "raw": + // TODO + } + return nil +} + +func (cli *cliMachines) inspectHub(machine *ent.Machine) error { + out := color.Output + + switch cli.cfg().Cscli.Output { + case "human": + cli.inspectHubHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(machine.Hubstate); err != nil { + return errors.New("failed to marshal") + } + + return nil + case "raw": + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"type", "name", "status", "version"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + rows := make([][]string, 0) + + state := machine.Hubstate + + // sort by type + name + fqNames := maptools.SortedKeys(state) + + for _, fqName := range fqNames { + itemType, itemName := splitFQName(fqName) + if itemType == "" { + log.Warningf("invalid hub item name '%s'", fqName) + continue + } + + item := state[fqName] + + rows = append(rows, []string{itemType, itemName, item.Status, item.Version}) + } + + for _, row := range rows { + if err := csvwriter.Write(row); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + } + return nil } func (cli *cliMachines) newInspectCmd() *cobra.Command { @@ -601,8 +809,19 @@ func (cli *cliMachines) newInspectCmd() *cobra.Command { Example: `cscli machines inspect "machine1"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, RunE: func(_ *cobra.Command, args []string) error { - return cli.inspect(args[0], showHub) + machineID := args[0] + machine, err := cli.db.QueryMachineByID(machineID) + if err != nil { + return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) + } + + if showHub { + return cli.inspectHub(machine) + } + + return cli.inspect(machine) }, } diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go deleted file mode 100644 index 360fb51c79c..00000000000 --- a/cmd/crowdsec-cli/machines_table.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "fmt" - "io" - "time" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/jedib0t/go-pretty/v6/text" -) - -var tableHeaders = []string{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Feature Flags", "Last Heartbeat"} - -func getAgentsTable(out io.Writer, machines []*ent.Machine) { - t := newLightTable(out) - t.SetHeaders(tableHeaders...) - - alignment := []text.Align{} - - for i := 0; i < len(tableHeaders); i++ { - alignment = append(alignment, table.AlignLeft) - } - - t.SetHeaderAlignment(alignment...) - t.SetAlignment(alignment...) - - for _, m := range machines { - validated := emoji.Prohibited - if m.IsValidated { - validated = emoji.CheckMark - } - - hb, active := getLastHeartbeat(m) - if !active { - hb = emoji.Warning + " " + hb - } - - t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, fmt.Sprintf("%s/%s", m.Osname, m.Osversion), m.AuthType, m.Featureflags, hb) - } - - t.Render() -} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 061733ef8d3..eb206b657b5 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -216,12 +216,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) - machines, err := db.ListMachines() - if err != nil { - return fmt.Errorf("unable to list machines: %w", err) - } - - getAgentsTable(out, machines) + // call the "cscli machines list" command directly, skip any preRun + cm := cliMachines{db: db, cfg: cli.cfg} + cm.list(out) stripped := stripAnsiString(out.String()) @@ -617,6 +614,10 @@ cscli support dump -f /tmp/crowdsec-support.zip Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { + output := cli.cfg().Cscli.Output + if output != "human" { + return fmt.Errorf("output format %s not supported for this command", output) + } return cli.dump(cmd.Context(), outFile) }, } diff --git a/go.mod b/go.mod index 1bc63a470df..ac4e262b282 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.11 + github.com/crowdsecurity/go-cs-lib v0.0.12 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ba4e6267bb9..25edf22b767 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.11 h1:ygUOKrkMLaJ2wjC020LgtY6XDkToNFK4NmYlhpkk5ko= -github.com/crowdsecurity/go-cs-lib v0.0.11/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.12 h1:yca1exzeqPc1vrZc2TZcljOID85lnUr9HAyPBR7xJUM= +github.com/crowdsecurity/go-cs-lib v0.0.12/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index fddb2e6a8b3..15cd077431b 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -39,8 +39,6 @@ type Machine struct { Version string `json:"version,omitempty"` // IsValidated holds the value of the "isValidated" field. IsValidated bool `json:"isValidated,omitempty"` - // Status holds the value of the "status" field. - Status string `json:"status,omitempty"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` // Osname holds the value of the "osname" field. @@ -62,7 +60,7 @@ type Machine struct { // MachineEdges holds the relations/edges for other nodes in the graph. type MachineEdges struct { // Alerts holds the value of the alerts edge. - Alerts []*Alert `json:"alerts,omitempty"` + Alerts []*Alert `json:"dafuck"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool @@ -88,7 +86,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case machine.FieldID: values[i] = new(sql.NullInt64) - case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: values[i] = new(sql.NullString) case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) @@ -175,12 +173,6 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.IsValidated = value.Bool } - case machine.FieldStatus: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field status", values[i]) - } else if value.Valid { - m.Status = value.String - } case machine.FieldAuthType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field auth_type", values[i]) @@ -295,9 +287,6 @@ func (m *Machine) String() string { builder.WriteString("isValidated=") builder.WriteString(fmt.Sprintf("%v", m.IsValidated)) builder.WriteString(", ") - builder.WriteString("status=") - builder.WriteString(m.Status) - builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(m.AuthType) builder.WriteString(", ") diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 179059edd4d..009e6e19c35 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -34,8 +34,6 @@ const ( FieldVersion = "version" // FieldIsValidated holds the string denoting the isvalidated field in the database. FieldIsValidated = "is_validated" - // FieldStatus holds the string denoting the status field in the database. - FieldStatus = "status" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" // FieldOsname holds the string denoting the osname field in the database. @@ -74,7 +72,6 @@ var Columns = []string{ FieldScenarios, FieldVersion, FieldIsValidated, - FieldStatus, FieldAuthType, FieldOsname, FieldOsversion, @@ -168,11 +165,6 @@ func ByIsValidated(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldIsValidated, opts...).ToFunc() } -// ByStatus orders the results by the status field. -func ByStatus(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldStatus, opts...).ToFunc() -} - // ByAuthType orders the results by the auth_type field. func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index aca66135f5c..de523510f33 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -105,11 +105,6 @@ func IsValidated(v bool) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } -// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. -func Status(v string) predicate.Machine { - return predicate.Machine(sql.FieldEQ(FieldStatus, v)) -} - // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) @@ -665,81 +660,6 @@ func IsValidatedNEQ(v bool) predicate.Machine { return predicate.Machine(sql.FieldNEQ(FieldIsValidated, v)) } -// StatusEQ applies the EQ predicate on the "status" field. -func StatusEQ(v string) predicate.Machine { - return predicate.Machine(sql.FieldEQ(FieldStatus, v)) -} - -// StatusNEQ applies the NEQ predicate on the "status" field. -func StatusNEQ(v string) predicate.Machine { - return predicate.Machine(sql.FieldNEQ(FieldStatus, v)) -} - -// StatusIn applies the In predicate on the "status" field. -func StatusIn(vs ...string) predicate.Machine { - return predicate.Machine(sql.FieldIn(FieldStatus, vs...)) -} - -// StatusNotIn applies the NotIn predicate on the "status" field. -func StatusNotIn(vs ...string) predicate.Machine { - return predicate.Machine(sql.FieldNotIn(FieldStatus, vs...)) -} - -// StatusGT applies the GT predicate on the "status" field. -func StatusGT(v string) predicate.Machine { - return predicate.Machine(sql.FieldGT(FieldStatus, v)) -} - -// StatusGTE applies the GTE predicate on the "status" field. -func StatusGTE(v string) predicate.Machine { - return predicate.Machine(sql.FieldGTE(FieldStatus, v)) -} - -// StatusLT applies the LT predicate on the "status" field. -func StatusLT(v string) predicate.Machine { - return predicate.Machine(sql.FieldLT(FieldStatus, v)) -} - -// StatusLTE applies the LTE predicate on the "status" field. -func StatusLTE(v string) predicate.Machine { - return predicate.Machine(sql.FieldLTE(FieldStatus, v)) -} - -// StatusContains applies the Contains predicate on the "status" field. -func StatusContains(v string) predicate.Machine { - return predicate.Machine(sql.FieldContains(FieldStatus, v)) -} - -// StatusHasPrefix applies the HasPrefix predicate on the "status" field. -func StatusHasPrefix(v string) predicate.Machine { - return predicate.Machine(sql.FieldHasPrefix(FieldStatus, v)) -} - -// StatusHasSuffix applies the HasSuffix predicate on the "status" field. -func StatusHasSuffix(v string) predicate.Machine { - return predicate.Machine(sql.FieldHasSuffix(FieldStatus, v)) -} - -// StatusIsNil applies the IsNil predicate on the "status" field. -func StatusIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldStatus)) -} - -// StatusNotNil applies the NotNil predicate on the "status" field. -func StatusNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldStatus)) -} - -// StatusEqualFold applies the EqualFold predicate on the "status" field. -func StatusEqualFold(v string) predicate.Machine { - return predicate.Machine(sql.FieldEqualFold(FieldStatus, v)) -} - -// StatusContainsFold applies the ContainsFold predicate on the "status" field. -func StatusContainsFold(v string) predicate.Machine { - return predicate.Machine(sql.FieldContainsFold(FieldStatus, v)) -} - // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 4ae0e5a9d1f..a68f7a23966 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -138,20 +138,6 @@ func (mc *MachineCreate) SetNillableIsValidated(b *bool) *MachineCreate { return mc } -// SetStatus sets the "status" field. -func (mc *MachineCreate) SetStatus(s string) *MachineCreate { - mc.mutation.SetStatus(s) - return mc -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (mc *MachineCreate) SetNillableStatus(s *string) *MachineCreate { - if s != nil { - mc.SetStatus(*s) - } - return mc -} - // SetAuthType sets the "auth_type" field. func (mc *MachineCreate) SetAuthType(s string) *MachineCreate { mc.mutation.SetAuthType(s) @@ -386,10 +372,6 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) _node.IsValidated = value } - if value, ok := mc.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - _node.Status = value - } if value, ok := mc.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index aa0f02542c1..c9a4f0b72ff 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -158,26 +158,6 @@ func (mu *MachineUpdate) SetNillableIsValidated(b *bool) *MachineUpdate { return mu } -// SetStatus sets the "status" field. -func (mu *MachineUpdate) SetStatus(s string) *MachineUpdate { - mu.mutation.SetStatus(s) - return mu -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (mu *MachineUpdate) SetNillableStatus(s *string) *MachineUpdate { - if s != nil { - mu.SetStatus(*s) - } - return mu -} - -// ClearStatus clears the value of the "status" field. -func (mu *MachineUpdate) ClearStatus() *MachineUpdate { - mu.mutation.ClearStatus() - return mu -} - // SetAuthType sets the "auth_type" field. func (mu *MachineUpdate) SetAuthType(s string) *MachineUpdate { mu.mutation.SetAuthType(s) @@ -411,12 +391,6 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.IsValidated(); ok { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } - if value, ok := mu.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - } - if mu.mutation.StatusCleared() { - _spec.ClearField(machine.FieldStatus, field.TypeString) - } if value, ok := mu.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } @@ -643,26 +617,6 @@ func (muo *MachineUpdateOne) SetNillableIsValidated(b *bool) *MachineUpdateOne { return muo } -// SetStatus sets the "status" field. -func (muo *MachineUpdateOne) SetStatus(s string) *MachineUpdateOne { - muo.mutation.SetStatus(s) - return muo -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (muo *MachineUpdateOne) SetNillableStatus(s *string) *MachineUpdateOne { - if s != nil { - muo.SetStatus(*s) - } - return muo -} - -// ClearStatus clears the value of the "status" field. -func (muo *MachineUpdateOne) ClearStatus() *MachineUpdateOne { - muo.mutation.ClearStatus() - return muo -} - // SetAuthType sets the "auth_type" field. func (muo *MachineUpdateOne) SetAuthType(s string) *MachineUpdateOne { muo.mutation.SetAuthType(s) @@ -926,12 +880,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if value, ok := muo.mutation.IsValidated(); ok { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } - if value, ok := muo.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - } - if muo.mutation.StatusCleared() { - _spec.ClearField(machine.FieldStatus, field.TypeString) - } if value, ok := muo.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 5c32c472403..5b436830192 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -205,7 +205,6 @@ var ( {Name: "scenarios", Type: field.TypeString, Nullable: true, Size: 100000}, {Name: "version", Type: field.TypeString, Nullable: true}, {Name: "is_validated", Type: field.TypeBool, Default: false}, - {Name: "status", Type: field.TypeString, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "password"}, {Name: "osname", Type: field.TypeString, Nullable: true}, {Name: "osversion", Type: field.TypeString, Nullable: true}, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 8d109ece379..045ecb3c9af 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -6538,7 +6538,6 @@ type MachineMutation struct { scenarios *string version *string isValidated *bool - status *string auth_type *string osname *string osversion *string @@ -7064,55 +7063,6 @@ func (m *MachineMutation) ResetIsValidated() { m.isValidated = nil } -// SetStatus sets the "status" field. -func (m *MachineMutation) SetStatus(s string) { - m.status = &s -} - -// Status returns the value of the "status" field in the mutation. -func (m *MachineMutation) Status() (r string, exists bool) { - v := m.status - if v == nil { - return - } - return *v, true -} - -// OldStatus returns the old "status" field's value of the Machine entity. -// If the Machine object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldStatus(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStatus is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStatus requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldStatus: %w", err) - } - return oldValue.Status, nil -} - -// ClearStatus clears the value of the "status" field. -func (m *MachineMutation) ClearStatus() { - m.status = nil - m.clearedFields[machine.FieldStatus] = struct{}{} -} - -// StatusCleared returns if the "status" field was cleared in this mutation. -func (m *MachineMutation) StatusCleared() bool { - _, ok := m.clearedFields[machine.FieldStatus] - return ok -} - -// ResetStatus resets all changes to the "status" field. -func (m *MachineMutation) ResetStatus() { - m.status = nil - delete(m.clearedFields, machine.FieldStatus) -} - // SetAuthType sets the "auth_type" field. func (m *MachineMutation) SetAuthType(s string) { m.auth_type = &s @@ -7482,7 +7432,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 17) + fields := make([]string, 0, 16) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7513,9 +7463,6 @@ func (m *MachineMutation) Fields() []string { if m.isValidated != nil { fields = append(fields, machine.FieldIsValidated) } - if m.status != nil { - fields = append(fields, machine.FieldStatus) - } if m.auth_type != nil { fields = append(fields, machine.FieldAuthType) } @@ -7562,8 +7509,6 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Version() case machine.FieldIsValidated: return m.IsValidated() - case machine.FieldStatus: - return m.Status() case machine.FieldAuthType: return m.AuthType() case machine.FieldOsname: @@ -7605,8 +7550,6 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldVersion(ctx) case machine.FieldIsValidated: return m.OldIsValidated(ctx) - case machine.FieldStatus: - return m.OldStatus(ctx) case machine.FieldAuthType: return m.OldAuthType(ctx) case machine.FieldOsname: @@ -7698,13 +7641,6 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetIsValidated(v) return nil - case machine.FieldStatus: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetStatus(v) - return nil case machine.FieldAuthType: v, ok := value.(string) if !ok { @@ -7789,9 +7725,6 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldVersion) { fields = append(fields, machine.FieldVersion) } - if m.FieldCleared(machine.FieldStatus) { - fields = append(fields, machine.FieldStatus) - } if m.FieldCleared(machine.FieldOsname) { fields = append(fields, machine.FieldOsname) } @@ -7833,9 +7766,6 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldVersion: m.ClearVersion() return nil - case machine.FieldStatus: - m.ClearStatus() - return nil case machine.FieldOsname: m.ClearOsname() return nil @@ -7889,9 +7819,6 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldIsValidated: m.ResetIsValidated() return nil - case machine.FieldStatus: - m.ResetStatus() - return nil case machine.FieldAuthType: m.ResetAuthType() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 8d50d916029..15413490633 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -151,7 +151,7 @@ func init() { // machine.DefaultIsValidated holds the default value on creation for the isValidated field. machine.DefaultIsValidated = machineDescIsValidated.Default.(bool) // machineDescAuthType is the schema descriptor for auth_type field. - machineDescAuthType := machineFields[11].Descriptor() + machineDescAuthType := machineFields[10].Descriptor() // machine.DefaultAuthType holds the default value on creation for the auth_type field. machine.DefaultAuthType = machineDescAuthType.Default.(string) metaFields := schema.Meta{}.Fields() diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 6fdcea2d824..53160b15e12 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -42,7 +42,6 @@ func (Machine) Fields() []ent.Field { field.String("version").Optional(), field.Bool("isValidated"). Default(false), - field.String("status").Optional(), field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), field.String("osname").Optional(), field.String("osversion").Optional(), @@ -55,6 +54,7 @@ func (Machine) Fields() []ent.Field { // Edges of the Machine. func (Machine) Edges() []ent.Edge { return []ent.Edge{ - edge.To("alerts", Alert.Type), + edge.To("alerts", Alert.Type). + StructTag(`json:"dafuck"`), } } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index ae3138891f0..b850ea0be07 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -107,6 +107,7 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric func (c *Client) MarkUsageMetricsAsSent(ids []int) error { _, err := c.Ent.Metric.Update(). Where(metric.IDIn(ids...)). + // XXX: no utc? SetPushedAt(time.Now()). Save(c.CTX) if err != nil { diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 1af5e97dcb4..f8b63fb3173 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,7 +62,7 @@ teardown() { assert_output 1 } -@test "machines delete has autocompletion" { +@test "machines [delete|inspect] has autocompletion" { rune -0 cscli machines add -a -f /dev/null foo1 rune -0 cscli machines add -a -f /dev/null foo2 rune -0 cscli machines add -a -f /dev/null bar @@ -72,6 +72,11 @@ teardown() { assert_line --index 1 'foo2' refute_line 'bar' refute_line 'baz' + rune -0 cscli __complete machines inspect 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' } @test "heartbeat is initially null" { From e18791a25281993b78b2bd7929aac9d35eff4bb0 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 2 Jul 2024 23:34:44 +0200 Subject: [PATCH 042/119] update for hub state change --- cmd/crowdsec-cli/machines.go | 60 ++++++------------------------ cmd/crowdsec/lpmetrics.go | 8 ++-- pkg/apiserver/apic_metrics.go | 12 ++++-- pkg/database/ent/schema/machine.go | 1 + pkg/database/machines.go | 14 ++++--- 5 files changed, 35 insertions(+), 60 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 932ed50a72b..aa0b2dd796d 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -22,7 +22,6 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/machineid" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -158,14 +157,6 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func splitFQName(fqName string) (string, string) { - parts := strings.Split(fqName, ":") - if len(parts) != 2 { - return "", "" - } - return parts[0], parts[1] -} - func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { state := machine.Hubstate @@ -177,26 +168,15 @@ func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { // group state rows by type for multiple tables rowsByType := make(map[string][]table.Row) - // sort by type + name - fqNames := maptools.SortedKeysNoCase(state) - - for _, fqName := range fqNames { - item := state[fqName] - - // here, name is type:actual_name - // we want to split it to get the type - itemType, itemName := splitFQName(fqName) - if itemType == "" { - log.Warningf("invalid hub item name '%s'", fqName) - continue - } + for itemType, items := range state { + for _, item := range items { + if _, ok := rowsByType[itemType]; !ok { + rowsByType[itemType] = make([]table.Row, 0) + } - if _, ok := rowsByType[itemType]; !ok { - rowsByType[itemType] = make([]table.Row, 0) + row := table.Row{item.Name, item.Status, item.Version} + rowsByType[itemType] = append(rowsByType[itemType], row) } - - row := table.Row{itemName, item.Status, item.Version} - rowsByType[itemType] = append(rowsByType[itemType], row) } for itemType, rows := range rowsByType { @@ -670,13 +650,8 @@ func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Feature Flags", ff}) } - for _, fqName := range maptools.SortedKeysNoCase(machine.Hubstate) { - itemType, itemName := splitFQName(fqName) - if itemType != cwhub.COLLECTIONS { - continue - } - - t.AppendRow(table.Row{"Collections", itemName}) + for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { + t.AppendRow(table.Row{"Collections", coll.Name}) } fmt.Fprintln(out, t.Render()) @@ -772,21 +747,10 @@ func (cli *cliMachines) inspectHub(machine *ent.Machine) error { rows := make([][]string, 0) - state := machine.Hubstate - - // sort by type + name - fqNames := maptools.SortedKeys(state) - - for _, fqName := range fqNames { - itemType, itemName := splitFQName(fqName) - if itemType == "" { - log.Warningf("invalid hub item name '%s'", fqName) - continue + for itemType, items := range machine.Hubstate { + for _, item := range items { + rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) } - - item := state[fqName] - - rows = append(rows, []string{itemType, itemName, item.Status, item.Version}) } for _, row := range rows { diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 5b650932bbf..b92c0cc4603 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -44,6 +44,7 @@ func getHubState(hub *cwhub.Hub) models.HubItems { ret := models.HubItems{} for _, itemType := range cwhub.ItemTypes { + ret[itemType] = []models.HubItem{} items, _ := hub.GetInstalledItemsByType(itemType) for _, item := range items { status := "official" @@ -53,10 +54,11 @@ func getHubState(hub *cwhub.Hub) models.HubItems { if item.State.Tainted { status = "tainted" } - ret[item.FQName()] = models.HubItem{ - Version: item.Version, + ret[itemType] = append(ret[itemType], models.HubItem{ + Name: item.Name, Status: status, - } + Version: item.Version, + }) } } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index c73ced2510a..019ddf982bb 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -116,10 +116,14 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { if lp.Hubstate != nil { // must carry over the hub state even if nothing is installed hubItems := models.HubItems{} - for name, item := range lp.Hubstate { - hubItems[name] = models.HubItem{ - Version: item.Version, - Status: item.Status, + for itemType, items := range lp.Hubstate { + hubItems[itemType] = []models.HubItem{} + for _, item := range items { + hubItems[itemType] = append(hubItems[itemType], models.HubItem{ + Name: item.Name, + Status: item.Status, + Version: item.Version, + }) } lpMetrics.HubItems = hubItems } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 8e907c0ee48..f5445d99248 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -10,6 +10,7 @@ import ( // ItemState is defined here instead of using pkg/models/HubItem to avoid introducing a dependency type ItemState struct { + Name string `json:"name,omitempty"` Status string `json:"status,omitempty"` Version string `json:"version,omitempty"` } diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 273bdb7d97c..8570609cb97 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -28,11 +28,15 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B //FIXME: nil deref heartbeat := time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) - hubState := map[string]schema.ItemState{} - for name, item := range hubItems { - hubState[name] = schema.ItemState{ - Version: item.Version, - Status: item.Status, + hubState := map[string][]schema.ItemState{} + for itemType, items := range hubItems { + hubState[itemType] = []schema.ItemState{} + for _, item := range items { + hubState[itemType] = append(hubState[itemType], schema.ItemState{ + Name: item.Name, + Status: item.Status, + Version: item.Version, + }) } } From b1dd9fd2b966e117507f59eae932a1bc1fe43678 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 10:46:28 +0200 Subject: [PATCH 043/119] inspect -o json --- cmd/crowdsec-cli/machines.go | 149 +++++++++++++---------------------- 1 file changed, 53 insertions(+), 96 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index aa0b2dd796d..d6de8f1222a 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -209,49 +209,37 @@ func (cli *cliMachines) listHuman(out io.Writer, machines []*ent.Machine) { fmt.Fprintln(out, t.Render()) } -func (cli *cliMachines) listJSON(out io.Writer, machines []*ent.Machine) error { - // only the info we want, no hub status, scenarios, edges, etc. - type machineInfo struct { - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - LastPush *time.Time `json:"last_push,omitempty"` - LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` - MachineId string `json:"machineId,omitempty"` - IpAddress string `json:"ipAddress,omitempty"` - Version string `json:"version,omitempty"` - IsValidated bool `json:"isValidated,omitempty"` - AuthType string `json:"auth_type"` - OS string `json:"os,omitempty"` - Featureflags []string `json:"featureflags,omitempty"` - Datasources map[string]int64 `json:"datasources,omitempty"` - } - - info := make([]machineInfo, 0, len(machines)) - for _, m := range machines { - info = append(info, machineInfo{ - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - LastPush: m.LastPush, - LastHeartbeat: m.LastHeartbeat, - MachineId: m.MachineId, - IpAddress: m.IpAddress, - Version: m.Version, - IsValidated: m.IsValidated, - AuthType: m.AuthType, - OS: m.Osname + "/" + m.Osversion, - Featureflags: strings.Split(m.Featureflags, ","), - Datasources: m.Datasources, - }) - } - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") +// machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. +type machineInfo struct { + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + LastPush *time.Time `json:"last_push,omitempty"` + LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` + MachineId string `json:"machineId,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + Version string `json:"version,omitempty"` + IsValidated bool `json:"isValidated,omitempty"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` + Datasources map[string]int64 `json:"datasources,omitempty"` +} - if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") +func newMachineInfo(m *ent.Machine) machineInfo { + return machineInfo{ + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + LastPush: m.LastPush, + LastHeartbeat: m.LastHeartbeat, + MachineId: m.MachineId, + IpAddress: m.IpAddress, + Version: m.Version, + IsValidated: m.IsValidated, + AuthType: m.AuthType, + OS: m.Osname + "/" + m.Osversion, + Featureflags: strings.Split(m.Featureflags, ","), + Datasources: m.Datasources, } - - return nil } func (cli *cliMachines) listCSV(out io.Writer, machines []*ent.Machine) error { @@ -290,7 +278,19 @@ func (cli *cliMachines) list(out io.Writer) error { case "human": cli.listHuman(out, machines) case "json": - return cli.listJSON(out, machines) + info := make([]machineInfo, 0, len(machines)) + for _, m := range machines { + info = append(info, newMachineInfo(m)) + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(info); err != nil { + return errors.New("failed to marshal") + } + + return nil case "raw": return cli.listCSV(out, machines) } @@ -657,67 +657,24 @@ func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { fmt.Fprintln(out, t.Render()) } -func (*cliMachines) inspectJSON(out io.Writer, machine *ent.Machine) error { -// type view struct { -// MachineID string `json:"machineId"` -// Name string `json:"name"` -// LocalVersion string `json:"local_version"` -// LocalPath string `json:"local_path"` -// Description string `json:"description"` -// UTF8Status string `json:"utf8_status"` -// Status string `json:"status"` -// } -// -// hubStatus := make(map[string][]itemHubStatus) -// for _, itemType := range itemTypes { -// // empty slice in case there are no items of this type -// hubStatus[itemType] = make([]itemHubStatus, len(items[itemType])) -// -// for i, item := range items[itemType] { -// status := item.State.Text() -// statusEmo := item.State.Emoji() -// hubStatus[itemType][i] = itemHubStatus{ -// Name: item.Name, -// LocalVersion: item.State.LocalVersion, -// LocalPath: item.State.LocalPath, -// Description: item.Description, -// Status: status, -// UTF8Status: fmt.Sprintf("%v %s", statusEmo, status), -// } -// } -// } -// -// x, err := json.MarshalIndent(hubStatus, "", " ") -// if err != nil { -// return fmt.Errorf("failed to unmarshal: %w", err) -// } -// -// out.Write(x) -// -// -// -// enc := json.NewEncoder(out) -// enc.SetIndent("", " ") -// -// if err := enc.Encode(machineView); err != nil { -// return errors.New("failed to marshal") -// } -// - return nil -} - func (cli *cliMachines) inspect(machine *ent.Machine) error { out := color.Output + outputFormat := cli.cfg().Cscli.Output - switch cli.cfg().Cscli.Output { + switch outputFormat { case "human": cli.inspectHuman(out, machine) case "json": - if err := cli.inspectJSON(out, machine); err != nil { - return err + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newMachineInfo(machine)); err != nil { + return errors.New("failed to marshal") } - case "raw": - // TODO + + return nil + default: + return fmt.Errorf("output format '%s' not supported", outputFormat) } return nil } From a470989d567a6b311865fe5bf452288d97f61f09 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Wed, 3 Jul 2024 10:50:40 +0200 Subject: [PATCH 044/119] validate metrics body based on auth type --- pkg/apiserver/controllers/controller.go | 4 +- pkg/apiserver/controllers/v1/usagemetrics.go | 55 +++++++++++++++----- 2 files changed, 43 insertions(+), 16 deletions(-) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 5f9ef3f3142..441dccbe912 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -9,7 +9,7 @@ import ( "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -66,7 +66,7 @@ func eitherAuthMiddleware(jwtMiddleware gin.HandlerFunc, apiKeyMiddleware gin.Ha apiKeyMiddleware(c) } else { jwtMiddleware(c) - } + } } } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index df83aed583c..1c8d291b325 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -33,10 +33,12 @@ func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, b func (c *Controller) UsageMetrics(gctx *gin.Context) { var input models.AllMetrics + logger := log.WithField("func", "UsageMetrics") + // parse the payload if err := gctx.ShouldBindJSON(&input); err != nil { - log.Errorf("Failed to bind json: %s", err) + logger.Errorf("Failed to bind json: %s", err) gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) return } @@ -45,15 +47,13 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { // work around a nuisance in the generated code cleanErr := RepeatedPrefixError{ OriginalError: err, - Prefix: "validation failure list:\n", + Prefix: "validation failure list:\n", } - log.Errorf("Failed to validate usage metrics: %s", cleanErr) + logger.Errorf("Failed to validate usage metrics: %s", cleanErr) gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": cleanErr.Error()}) return } - // TODO: validate payload with the right type, depending on auth context - var ( generatedType metric.GeneratedType generatedBy string @@ -62,19 +62,23 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { bouncer, _ := getBouncerFromContext(gctx) if bouncer != nil { - log.Tracef("Received usage metris for bouncer: %s", bouncer.Name) + logger.Tracef("Received usage metris for bouncer: %s", bouncer.Name) generatedType = metric.GeneratedTypeRC generatedBy = bouncer.Name } machineID, _ := getMachineIDFromContext(gctx) if machineID != "" { - log.Tracef("Received usage metrics for log processor: %s", machineID) + logger.Tracef("Received usage metrics for log processor: %s", machineID) generatedType = metric.GeneratedTypeLP generatedBy = machineID } - // TODO: if both or none are set, which error should we return? + if machineID != "" && bouncer != nil { + logger.Errorf("Payload has both machineID and bouncer") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has both LP and RC data"}) + return + } var ( payload map[string]any @@ -85,11 +89,22 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { switch len(input.LogProcessors) { case 0: - break + if machineID != "" { + logger.Errorf("Missing log processor data") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing log processor data"}) + return + } case 1: // the final slice can't have more than one item, // guaranteed by the swagger schema item0 := input.LogProcessors[0] + + err := item0.Validate(strfmt.Default) + if err != nil { + logger.Errorf("Failed to validate log processor data: %s", err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + return + } payload = map[string]any{ "metrics": item0.Metrics, } @@ -97,7 +112,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { hubItems = item0.HubItems datasources = item0.Datasources default: - log.Errorf("Payload has more than one log processor") + logger.Errorf("Payload has more than one log processor") // this is not checked in the swagger schema gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one log processor"}) return @@ -105,9 +120,21 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { switch len(input.RemediationComponents) { case 0: - break + if bouncer != nil { + logger.Errorf("Missing remediation component data") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing remediation component data"}) + return + } case 1: item0 := input.RemediationComponents[0] + + err := item0.Validate(strfmt.Default) + if err != nil { + logger.Errorf("Failed to validate remediation component data: %s", err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + return + } + payload = map[string]any{ "type": item0.Type, "metrics": item0.Metrics, @@ -120,7 +147,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) if err != nil { - log.Errorf("Failed to update base metrics: %s", err) + logger.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) return } @@ -134,13 +161,13 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { jsonPayload, err := json.Marshal(payload) if err != nil { - log.Errorf("Failed to marshal usage metrics: %s", err) + logger.Errorf("Failed to marshal usage metrics: %s", err) c.HandleDBErrors(gctx, err) return } if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, collectedAt, string(jsonPayload)); err != nil { - log.Error(err) + logger.Error(err) c.HandleDBErrors(gctx, err) return } From 4e2a3f53c9fab4fb8bdc9b034a66539d196b8d10 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 11:10:34 +0200 Subject: [PATCH 045/119] machines list -o raw: show timestamp instead of delta for last_heartbeat --- cmd/crowdsec-cli/machines.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index d6de8f1222a..cdeec5c36f1 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -256,7 +256,10 @@ func (cli *cliMachines) listCSV(out io.Writer, machines []*ent.Machine) error { validated = "true" } - hb, _ := getLastHeartbeat(m) + hb := "-" + if m.LastHeartbeat != nil { + hb = m.LastHeartbeat.Format(time.RFC3339) + } if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { return fmt.Errorf("failed to write raw output: %w", err) @@ -674,7 +677,7 @@ func (cli *cliMachines) inspect(machine *ent.Machine) error { return nil default: - return fmt.Errorf("output format '%s' not supported", outputFormat) + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil } From 18cc51d039db637ebb3bec66fc1dbbdf1e303972 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 11:33:41 +0200 Subject: [PATCH 046/119] merge back bouncers_table.go to list command --- cmd/crowdsec-cli/bouncers.go | 33 +++++++++++++++++++++++++----- cmd/crowdsec-cli/bouncers_table.go | 33 ------------------------------ cmd/crowdsec-cli/support.go | 9 +++----- 3 files changed, 31 insertions(+), 44 deletions(-) delete mode 100644 cmd/crowdsec-cli/bouncers_table.go diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index a392ff03c09..1b6485fe602 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "os" "slices" "strings" @@ -12,12 +13,15 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -83,9 +87,28 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func (cli *cliBouncers) list() error { - out := color.Output +func (cli *cliBouncers) listHuman(out io.Writer, bouncers []*ent.Bouncer) { + t := newLightTable(out).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) + for _, b := range bouncers { + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) + } + + fmt.Fprintln(out, t.Render()) +} + +func (cli *cliBouncers) list(out io.Writer) error { bouncers, err := cli.db.ListBouncers() if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) @@ -93,7 +116,7 @@ func (cli *cliBouncers) list() error { switch cli.cfg().Cscli.Output { case "human": - getBouncersTable(out, bouncers) + cli.listHuman(out, bouncers) case "json": enc := json.NewEncoder(out) enc.SetIndent("", " ") @@ -140,7 +163,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() + return cli.list(color.Output) }, } @@ -293,7 +316,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { return nil } - getBouncersTable(color.Output, bouncers) + cli.listHuman(color.Output, bouncers) if !force { if yes, err := askYesNo( diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go deleted file mode 100644 index c32762ba266..00000000000 --- a/cmd/crowdsec-cli/bouncers_table.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "io" - "time" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { - t := newLightTable(out) - t.SetHeaders("Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - - for _, b := range bouncers { - revoked := emoji.CheckMark - if b.Revoked { - revoked = emoji.Prohibited - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - t.AddRow(b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType) - } - - t.Render() -} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index eb206b657b5..1f98768f778 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -193,12 +193,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) - bouncers, err := db.ListBouncers() - if err != nil { - return fmt.Errorf("unable to list bouncers: %w", err) - } - - getBouncersTable(out, bouncers) + // call the "cscli bouncers list" command directly, skip any preRun + cm := cliBouncers{db: db, cfg: cli.cfg} + cm.list(out) stripped := stripAnsiString(out.String()) From 0f1e38a3dc09cfb9977e3e391ad6d5b8ad671dad Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 12:20:11 +0200 Subject: [PATCH 047/119] func test empty payload --- test/bats/30_machines_metrics.bats | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/bats/30_machines_metrics.bats b/test/bats/30_machines_metrics.bats index 22f73587e6b..e29e2175037 100644 --- a/test/bats/30_machines_metrics.bats +++ b/test/bats/30_machines_metrics.bats @@ -34,8 +34,12 @@ teardown() { EOT ) - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -22 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" refute_output + assert_stderr 'curl: (22) The requested URL returned error: 400' + + rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + assert_json '{message: "Missing log processor data"}' refute_stderr } From 5298236d2707a3ae0e85fe77fa19a122612ecc24 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 14:04:31 +0200 Subject: [PATCH 048/119] cscli bouncers inspect --- cmd/crowdsec-cli/bouncers.go | 173 +++++++++++++++++++++++++++++------ cmd/crowdsec-cli/machines.go | 10 +- pkg/database/ent/helpers.go | 17 ++++ 3 files changed, 169 insertions(+), 31 deletions(-) create mode 100644 pkg/database/ent/helpers.go diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 1b6485fe602..f87d702a364 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -21,6 +21,7 @@ import ( middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -83,11 +84,12 @@ Note: This command requires database direct access, so is intended to be run on cmd.AddCommand(cli.newAddCmd()) cmd.AddCommand(cli.newDeleteCmd()) cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) return cmd } -func (cli *cliBouncers) listHuman(out io.Writer, bouncers []*ent.Bouncer) { +func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t := newLightTable(out).Writer t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) @@ -108,6 +110,65 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers []*ent.Bouncer) { fmt.Fprintln(out, t.Render()) } +// bouncerInfo contains only the data we want for inspect/list +type bouncerInfo struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + IPAddress string `json:"ip_address"` + Type string `json:"type"` + Version string `json:"version"` + LastPull *time.Time `json:"last_pull"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` +} + +func newBouncerInfo(b *ent.Bouncer) bouncerInfo { + return bouncerInfo{ + CreatedAt: b.CreatedAt, + UpdatedAt: b.UpdatedAt, + Name: b.Name, + Revoked: b.Revoked, + IPAddress: b.IPAddress, + Type: b.Type, + Version: b.Version, + LastPull: b.LastPull, + AuthType: b.AuthType, + OS: b.GetOSNameAndVersion(), + // XXX: Featureflags: strings.Split(b.FeatureFlags, ","), + } +} + +func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { + csvwriter := csv.NewWriter(out) + + if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, b := range bouncers { + valid := "validated" + if b.Revoked { + valid = "pending" + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + return nil +} + + func (cli *cliBouncers) list(out io.Writer) error { bouncers, err := cli.db.ListBouncers() if err != nil { @@ -118,38 +179,21 @@ func (cli *cliBouncers) list(out io.Writer) error { case "human": cli.listHuman(out, bouncers) case "json": + info := make([]bouncerInfo, 0, len(bouncers)) + for _, b := range bouncers { + info = append(info, newBouncerInfo(b)) + } + enc := json.NewEncoder(out) enc.SetIndent("", " ") - if err := enc.Encode(bouncers); err != nil { - return fmt.Errorf("failed to marshal: %w", err) + if err := enc.Encode(info); err != nil { + return errors.New("failed to marshal") } return nil case "raw": - csvwriter := csv.NewWriter(out) - - if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { - return fmt.Errorf("failed to write raw header: %w", err) - } - - for _, b := range bouncers { - valid := "validated" - if b.Revoked { - valid = "pending" - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { - return fmt.Errorf("failed to write raw: %w", err) - } - } - - csvwriter.Flush() + return cli.listCSV(out, bouncers) } return nil @@ -365,3 +409,80 @@ cscli bouncers prune -d 45m --force`, return cmd } + +func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { + t := newTable(out).Writer + + t.SetTitle("Bouncer: " + bouncer.Name) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + t.AppendRows([]table.Row{ + {"Created At", bouncer.CreatedAt}, + {"Last Update", bouncer.UpdatedAt}, + {"Revoked?", bouncer.Revoked}, + {"IP Address", bouncer.IPAddress}, + {"Type", bouncer.Type}, + {"Version", bouncer.Version}, + {"Last Pull", bouncer.LastPull}, + {"Auth type", bouncer.AuthType}, + {"OS", bouncer.GetOSNameAndVersion()}, + // XXX: Featureflags: strings.Split(b.FeatureFlags, ","), + }) + + // for _, ff := range strings.Split(bouncer.Featureflags, ",") { + // t.AppendRow(table.Row{"Feature Flags", ff}) + // } + + fmt.Fprintln(out, t.Render()) +} + +func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, bouncer) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { + return errors.New("failed to marshal") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + return nil +} + + +func (cli *cliBouncers) newInspectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect [bouncer_name]", + Short: "inspect a bouncer by name", + Example: `cscli bouncers inspect "bouncer1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + bouncerName := args[0] + + b, err := cli.db.Ent.Bouncer.Query(). + Where(bouncer.Name(bouncerName)). + Only(cmd.Context()) + if err != nil { + return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) + } + + return cli.inspect(b) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index cdeec5c36f1..8fccfa40ad7 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -188,7 +188,7 @@ func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { } } -func (cli *cliMachines) listHuman(out io.Writer, machines []*ent.Machine) { +func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t := newLightTable(out).Writer t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) @@ -203,7 +203,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines []*ent.Machine) { hb = emoji.Warning + " " + hb } - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, fmt.Sprintf("%s/%s", m.Osname, m.Osversion), m.AuthType, hb}) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.GetOSNameAndVersion(), m.AuthType, hb}) } fmt.Fprintln(out, t.Render()) @@ -236,13 +236,13 @@ func newMachineInfo(m *ent.Machine) machineInfo { Version: m.Version, IsValidated: m.IsValidated, AuthType: m.AuthType, - OS: m.Osname + "/" + m.Osversion, + OS: m.GetOSNameAndVersion(), Featureflags: strings.Split(m.Featureflags, ","), Datasources: m.Datasources, } } -func (cli *cliMachines) listCSV(out io.Writer, machines []*ent.Machine) error { +func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { csvwriter := csv.NewWriter(out) err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) @@ -641,7 +641,7 @@ func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { {"Last Heartbeat", machine.LastHeartbeat}, {"Validated?", machine.IsValidated}, {"CrowdSec version", machine.Version}, - {"OS", machine.Osname + "/" + machine.Osversion}, + {"OS", machine.GetOSNameAndVersion()}, {"Auth type", machine.AuthType}, }) diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go new file mode 100644 index 00000000000..5260e760f0a --- /dev/null +++ b/pkg/database/ent/helpers.go @@ -0,0 +1,17 @@ +package ent + +func (m *Machine) GetOSNameAndVersion() string { + ret := m.Osname + if m.Osversion != "" { + ret += "/" + m.Osversion + } + return ret +} + +func (b *Bouncer) GetOSNameAndVersion() string { + ret := b.Osname + if b.Osversion != "" { + ret += "/" + b.Osversion + } + return ret +} From 06ff89198f2435f9d9b851844f3fb3363bd31890 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 14:20:42 +0200 Subject: [PATCH 049/119] bouncer feature flags; handle lastpull == nil --- cmd/crowdsec-cli/bouncers.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index f87d702a364..085c7da0c11 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -137,7 +137,7 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { LastPull: b.LastPull, AuthType: b.AuthType, OS: b.GetOSNameAndVersion(), - // XXX: Featureflags: strings.Split(b.FeatureFlags, ","), + Featureflags: strings.Split(b.Featureflags, ","), } } @@ -419,6 +419,11 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {Number: 1, AutoMerge: true}, }) + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.String() + } + t.AppendRows([]table.Row{ {"Created At", bouncer.CreatedAt}, {"Last Update", bouncer.UpdatedAt}, @@ -426,15 +431,14 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"IP Address", bouncer.IPAddress}, {"Type", bouncer.Type}, {"Version", bouncer.Version}, - {"Last Pull", bouncer.LastPull}, + {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, {"OS", bouncer.GetOSNameAndVersion()}, - // XXX: Featureflags: strings.Split(b.FeatureFlags, ","), }) - // for _, ff := range strings.Split(bouncer.Featureflags, ",") { - // t.AppendRow(table.Row{"Feature Flags", ff}) - // } + for _, ff := range strings.Split(bouncer.Featureflags, ",") { + t.AppendRow(table.Row{"Feature Flags", ff}) + } fmt.Fprintln(out, t.Render()) } From 450387e87fd43adb2d56096e880f8f61c33e0b8b Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 15:02:44 +0200 Subject: [PATCH 050/119] feature flags --- cmd/crowdsec-cli/bouncers.go | 4 ++-- cmd/crowdsec-cli/machines.go | 4 ++-- pkg/database/ent/helpers.go | 37 ++++++++++++++++++++++++++++++++++-- 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 085c7da0c11..0673473d72a 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -137,7 +137,7 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { LastPull: b.LastPull, AuthType: b.AuthType, OS: b.GetOSNameAndVersion(), - Featureflags: strings.Split(b.Featureflags, ","), + Featureflags: b.GetFeatureFlagList(), } } @@ -436,7 +436,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"OS", bouncer.GetOSNameAndVersion()}, }) - for _, ff := range strings.Split(bouncer.Featureflags, ",") { + for _, ff := range bouncer.GetFeatureFlagList() { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 8fccfa40ad7..155d7679833 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -237,7 +237,7 @@ func newMachineInfo(m *ent.Machine) machineInfo { IsValidated: m.IsValidated, AuthType: m.AuthType, OS: m.GetOSNameAndVersion(), - Featureflags: strings.Split(m.Featureflags, ","), + Featureflags: m.GetFeatureFlagList(), Datasources: m.Datasources, } } @@ -649,7 +649,7 @@ func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) } - for _, ff := range strings.Split(machine.Featureflags, ",") { + for _, ff := range machine.GetFeatureFlagList() { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go index 5260e760f0a..0c19fe76f0a 100644 --- a/pkg/database/ent/helpers.go +++ b/pkg/database/ent/helpers.go @@ -1,9 +1,22 @@ package ent +import ( + "strings" +) + + +// XXX: we can DRY here + func (m *Machine) GetOSNameAndVersion() string { ret := m.Osname if m.Osversion != "" { - ret += "/" + m.Osversion + if ret != "" { + ret += "/" + } + ret += m.Osversion + } + if ret == "" { + return "?" } return ret } @@ -11,7 +24,27 @@ func (m *Machine) GetOSNameAndVersion() string { func (b *Bouncer) GetOSNameAndVersion() string { ret := b.Osname if b.Osversion != "" { - ret += "/" + b.Osversion + if ret != "" { + ret += "/" + } + ret += b.Osversion + } + if ret == "" { + return "?" } return ret } + +func (m *Machine) GetFeatureFlagList() []string { + if m.Featureflags == "" { + return nil + } + return strings.Split(m.Featureflags, ",") +} + +func (b *Bouncer) GetFeatureFlagList() []string { + if b.Featureflags == "" { + return nil + } + return strings.Split(b.Featureflags, ",") +} From 3ba3236c7a1aafcb06527ba052c6e74dfdf1ab89 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 15:15:56 +0200 Subject: [PATCH 051/119] disable redundant-import-alias linter --- .golangci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 66c720381de..855c73f9af3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -143,6 +143,8 @@ linters-settings: disabled: true - name: struct-tag disabled: true + - name: redundant-import-alias + disabled: true - name: time-equal disabled: true - name: var-naming From 421ceb29ee05a234fc190ae90d5a86e8ec2f83d4 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 3 Jul 2024 16:05:14 +0200 Subject: [PATCH 052/119] order items by name before sending metrics --- cmd/crowdsec/lpmetrics.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index b92c0cc4603..afb3af99fe4 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -46,6 +46,8 @@ func getHubState(hub *cwhub.Hub) models.HubItems { for _, itemType := range cwhub.ItemTypes { ret[itemType] = []models.HubItem{} items, _ := hub.GetInstalledItemsByType(itemType) + cwhub.SortItemSlice(items) + for _, item := range items { status := "official" if item.State.IsLocal() { From af0ef9050ad714ce24be6923b906c9e6a8e57944 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Wed, 3 Jul 2024 18:09:35 +0200 Subject: [PATCH 053/119] basic tests --- pkg/apiserver/usage_metrics_test.go | 124 ++++++++++++++++++++++++++++ pkg/database/machines.go | 17 +++- 2 files changed, 139 insertions(+), 2 deletions(-) create mode 100644 pkg/apiserver/usage_metrics_test.go diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go new file mode 100644 index 00000000000..aa371b0735c --- /dev/null +++ b/pkg/apiserver/usage_metrics_test.go @@ -0,0 +1,124 @@ +package apiserver + +import ( + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLPMetrics(t *testing.T) { + tests := []struct { + name string + body string + expectedStatusCode int + expectedResponse string + authType string + }{ + { + name: "empty metrics for LP", + body: `{ + }`, + expectedStatusCode: 400, + expectedResponse: "Missing log processor data", + authType: PASSWORD, + }, + { + name: "basic metrics for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "type": "test-bouncer", + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + authType: PASSWORD, + }, + { + name: "wrong auth type for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "type": "test-bouncer", + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 400, + expectedResponse: "", + authType: APIKEY, + }, + { + name: "missing OS field for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "type": "test-bouncer", + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + authType: PASSWORD, + }, + { + name: "missing datasources for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "type": "test-bouncer", + "hub_items": {} + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "", + authType: PASSWORD, + }, + } + + lapi := SetupLAPITest(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + + assert.Equal(t, tt.expectedStatusCode, w.Code) + assert.Contains(t, w.Body.String(), tt.expectedResponse) + + //TODO: check metrics inside the database + }) + } + +} diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 8570609cb97..c1f15685845 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/ptr" ) const ( @@ -25,8 +26,20 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - //FIXME: nil deref - heartbeat := time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) + if os == nil { + os = &models.OSversion{ + Name: ptr.Of(""), + Version: ptr.Of(""), + } + } + + var heartbeat time.Time + + if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { + heartbeat = time.Now().UTC() + } else { + heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) + } hubState := map[string][]schema.ItemState{} for itemType, items := range hubItems { From 8e4ad9142f65dbb74f388fb1d21195a4fb9e9d3d Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Wed, 3 Jul 2024 18:17:30 +0200 Subject: [PATCH 054/119] up --- pkg/apiserver/usage_metrics_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index aa371b0735c..028ecf6f7db 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -63,7 +63,7 @@ func TestLPMetrics(t *testing.T) { ] }`, expectedStatusCode: 400, - expectedResponse: "", + expectedResponse: "Missing remediation component data", authType: APIKEY, }, { @@ -103,7 +103,7 @@ func TestLPMetrics(t *testing.T) { ] }`, expectedStatusCode: 422, - expectedResponse: "", + expectedResponse: "log_processors.0.datasources in body is required", authType: PASSWORD, }, } From af278ce89c6ec1407d47702e7bd681e2f3f7b206 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 4 Jul 2024 11:05:39 +0200 Subject: [PATCH 055/119] up --- pkg/apiserver/controllers/v1/usagemetrics.go | 8 + pkg/apiserver/usage_metrics_test.go | 299 +++++++++++++++++-- pkg/database/bouncers.go | 2 - pkg/database/machines.go | 8 - 4 files changed, 289 insertions(+), 28 deletions(-) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 1c8d291b325..b0453beface 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -13,6 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/go-cs-lib/ptr" ) // updateBaseMetrics updates the base metrics for a machine or bouncer @@ -145,6 +146,13 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } + if baseMetrics.Os == nil { + baseMetrics.Os = &models.OSversion{ + Name: ptr.Of(""), + Version: ptr.Of(""), + } + } + err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) if err != nil { logger.Errorf("Failed to update base metrics: %s", err) diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 028ecf6f7db..19942a5fcf1 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -1,20 +1,27 @@ package apiserver import ( + "context" "net/http" "strings" "testing" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/stretchr/testify/assert" ) func TestLPMetrics(t *testing.T) { tests := []struct { - name string - body string - expectedStatusCode int - expectedResponse string - authType string + name string + body string + expectedStatusCode int + expectedResponse string + expectedMetricsCount int + expectedOSName string + expectedOSVersion string + expectedFeatureFlags string + authType string }{ { name: "empty metrics for LP", @@ -25,7 +32,7 @@ func TestLPMetrics(t *testing.T) { authType: PASSWORD, }, { - name: "basic metrics for LP", + name: "basic metrics with empty dynamic metrics for LP", body: ` { "log_processors": [ @@ -35,15 +42,42 @@ func TestLPMetrics(t *testing.T) { "utc_startup_timestamp": 42, "metrics": [], "feature_flags": ["a", "b", "c"], - "type": "test-bouncer", "datasources": {"file": 42}, "hub_items": {} } ] }`, - expectedStatusCode: 201, - expectedResponse: "", - authType: PASSWORD, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "basic metrics with dynamic metrics for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [{"meta":{"utc_now_timestamp":42, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }, {"meta":{"utc_now_timestamp":43, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, }, { name: "wrong auth type for LP", @@ -56,7 +90,6 @@ func TestLPMetrics(t *testing.T) { "utc_startup_timestamp": 42, "metrics": [], "feature_flags": ["a", "b", "c"], - "type": "test-bouncer", "datasources": {"file": 42}, "hub_items": {} } @@ -76,15 +109,16 @@ func TestLPMetrics(t *testing.T) { "utc_startup_timestamp": 42, "metrics": [], "feature_flags": ["a", "b", "c"], - "type": "test-bouncer", "datasources": {"file": 42}, "hub_items": {} } ] }`, - expectedStatusCode: 201, - expectedResponse: "", - authType: PASSWORD, + expectedStatusCode: 201, + expectedResponse: "", + expectedMetricsCount: 1, + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, }, { name: "missing datasources for LP", @@ -97,7 +131,6 @@ func TestLPMetrics(t *testing.T) { "utc_startup_timestamp": 42, "metrics": [], "feature_flags": ["a", "b", "c"], - "type": "test-bouncer", "hub_items": {} } ] @@ -106,18 +139,248 @@ func TestLPMetrics(t *testing.T) { expectedResponse: "log_processors.0.datasources in body is required", authType: PASSWORD, }, + { + name: "missing feature flags for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "datasources": {"file": 42}, + "hub_items": {} } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedOSName: "foo", + expectedOSVersion: "42", + authType: PASSWORD, + }, + { + name: "missing OS name", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "log_processors.0.os.name in body is required", + authType: PASSWORD, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + lapi := SetupLAPITest(t) + + dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + if err != nil { + t.Fatalf("unable to create database client: %s", err) + } + + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + + assert.Equal(t, tt.expectedStatusCode, w.Code) + assert.Contains(t, w.Body.String(), tt.expectedResponse) + + machine, _ := dbClient.QueryMachineByID("test") + metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") + + assert.Equal(t, tt.expectedMetricsCount, len(metrics)) + assert.Equal(t, tt.expectedOSName, machine.Osname) + assert.Equal(t, tt.expectedOSVersion, machine.Osversion) + assert.Equal(t, tt.expectedFeatureFlags, machine.Featureflags) + + if len(metrics) > 0 { + assert.Equal(t, "test", metrics[0].GeneratedBy) + assert.Equal(t, metric.GeneratedType("LP"), metrics[0].GeneratedType) + } + }) + } + +} - lapi := SetupLAPITest(t) +func TestRCMetrics(t *testing.T) { + tests := []struct { + name string + body string + expectedStatusCode int + expectedResponse string + expectedMetricsCount int + expectedOSName string + expectedOSVersion string + expectedFeatureFlags string + authType string + }{ + { + name: "empty metrics for RC", + body: `{ + }`, + expectedStatusCode: 400, + expectedResponse: "Missing remediation component data", + authType: APIKEY, + }, + { + name: "basic metrics with empty dynamic metrics for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "basic metrics with dynamic metrics for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [{"meta":{"utc_now_timestamp":42, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }, {"meta":{"utc_now_timestamp":43, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "wrong auth type for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 400, + expectedResponse: "Missing log processor data", + authType: PASSWORD, + }, + { + name: "missing OS field for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + expectedMetricsCount: 1, + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "missing feature flags for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedOSName: "foo", + expectedOSVersion: "42", + authType: APIKEY, + }, + { + name: "missing OS name", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "remediation_components.0.os.name in body is required", + authType: APIKEY, + }, + } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + + lapi := SetupLAPITest(t) + + dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + if err != nil { + t.Fatalf("unable to create database client: %s", err) + } + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - //TODO: check metrics inside the database + bouncer, _ := dbClient.SelectBouncerByName("test") + metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") + + assert.Equal(t, tt.expectedMetricsCount, len(metrics)) + assert.Equal(t, tt.expectedOSName, bouncer.Osname) + assert.Equal(t, tt.expectedOSVersion, bouncer.Osversion) + assert.Equal(t, tt.expectedFeatureFlags, bouncer.Featureflags) + + if len(metrics) > 0 { + assert.Equal(t, "test", metrics[0].GeneratedBy) + assert.Equal(t, metric.GeneratedType("RC"), metrics[0].GeneratedType) + } }) } diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 77cddd78152..79e0cef3df1 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -16,8 +16,6 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - // XXX: bouncers have no heartbeat, they have "last pull", are we updating it? - _, err := c.Ent.Bouncer. Update(). Where(bouncer.NameEQ(bouncerName)). diff --git a/pkg/database/machines.go b/pkg/database/machines.go index c1f15685845..c6325b08260 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" ) const ( @@ -26,13 +25,6 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - if os == nil { - os = &models.OSversion{ - Name: ptr.Of(""), - Version: ptr.Of(""), - } - } - var heartbeat time.Time if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { From 9fb1d40b7e6e97b37772e7bc45460e18f0a9abe1 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 4 Jul 2024 11:18:29 +0200 Subject: [PATCH 056/119] up --- pkg/apiserver/usage_metrics_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 19942a5fcf1..aa2b2a532e8 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -200,7 +200,7 @@ func TestLPMetrics(t *testing.T) { machine, _ := dbClient.QueryMachineByID("test") metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") - assert.Equal(t, tt.expectedMetricsCount, len(metrics)) + assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, machine.Osname) assert.Equal(t, tt.expectedOSVersion, machine.Osversion) assert.Equal(t, tt.expectedFeatureFlags, machine.Featureflags) @@ -372,7 +372,7 @@ func TestRCMetrics(t *testing.T) { bouncer, _ := dbClient.SelectBouncerByName("test") metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") - assert.Equal(t, tt.expectedMetricsCount, len(metrics)) + assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, bouncer.Osname) assert.Equal(t, tt.expectedOSVersion, bouncer.Osversion) assert.Equal(t, tt.expectedFeatureFlags, bouncer.Featureflags) From 7431b52f06f4d105fa530dba3fcf4d5ac31dcc0d Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 4 Jul 2024 11:24:51 +0200 Subject: [PATCH 057/119] up --- pkg/apiserver/apic_metrics.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 019ddf982bb..6d7b03a02fd 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" "net/http" - "strings" "slices" + "strings" "time" "github.com/blackfireio/osinfo" @@ -120,13 +120,15 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { hubItems[itemType] = []models.HubItem{} for _, item := range items { hubItems[itemType] = append(hubItems[itemType], models.HubItem{ - Name: item.Name, - Status: item.Status, + Name: item.Name, + Status: item.Status, Version: item.Version, }) } lpMetrics.HubItems = hubItems } + } else { + lpMetrics.HubItems = models.HubItems{} } lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) From 636ce5c94301e78273f1ccd3357cccc2490f424d Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 4 Jul 2024 09:06:17 +0200 Subject: [PATCH 058/119] dry --- cmd/crowdsec-cli/bouncers.go | 42 +++++++++++++++++++++++--- cmd/crowdsec-cli/machines.go | 10 +++---- pkg/database/ent/helpers.go | 58 +++++++++--------------------------- 3 files changed, 57 insertions(+), 53 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 0673473d72a..8c041ef462b 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -26,6 +26,40 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type featureflagProvider interface { + GetFeatureflags() string +} + +type osProvider interface { + GetOsname() string + GetOsversion() string +} + +func getOSNameAndVersion(o osProvider) string { + ret := o.GetOsname() + if o.GetOsversion() != "" { + if ret != "" { + ret += "/" + } + + ret += o.GetOsversion() + } + + if ret == "" { + return "?" + } + + return ret +} + +func getFeatureFlagList(o featureflagProvider) []string { + if o.GetFeatureflags() == "" { + return nil + } + + return strings.Split(o.GetFeatureflags(), ",") +} + func askYesNo(message string, defaultAnswer bool) (bool, error) { var answer bool @@ -136,8 +170,8 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { Version: b.Version, LastPull: b.LastPull, AuthType: b.AuthType, - OS: b.GetOSNameAndVersion(), - Featureflags: b.GetFeatureFlagList(), + OS: getOSNameAndVersion(b), + Featureflags: getFeatureFlagList(b), } } @@ -433,10 +467,10 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"Version", bouncer.Version}, {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, - {"OS", bouncer.GetOSNameAndVersion()}, + {"OS", getOSNameAndVersion(bouncer)}, }) - for _, ff := range bouncer.GetFeatureFlagList() { + for _, ff := range getFeatureFlagList(bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 8796d3de9b8..67d7d7d4da7 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -202,7 +202,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { hb = emoji.Warning + " " + hb } - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.GetOSNameAndVersion(), m.AuthType, hb}) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) } fmt.Fprintln(out, t.Render()) @@ -235,8 +235,8 @@ func newMachineInfo(m *ent.Machine) machineInfo { Version: m.Version, IsValidated: m.IsValidated, AuthType: m.AuthType, - OS: m.GetOSNameAndVersion(), - Featureflags: m.GetFeatureFlagList(), + OS: getOSNameAndVersion(m), + Featureflags: getFeatureFlagList(m), Datasources: m.Datasources, } } @@ -641,7 +641,7 @@ func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { {"Last Heartbeat", machine.LastHeartbeat}, {"Validated?", machine.IsValidated}, {"CrowdSec version", machine.Version}, - {"OS", machine.GetOSNameAndVersion()}, + {"OS", getOSNameAndVersion(machine)}, {"Auth type", machine.AuthType}, }) @@ -649,7 +649,7 @@ func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) } - for _, ff := range machine.GetFeatureFlagList() { + for _, ff := range getFeatureFlagList(machine) { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go index c6cdbd7f32b..9b30ce451e0 100644 --- a/pkg/database/ent/helpers.go +++ b/pkg/database/ent/helpers.go @@ -1,55 +1,25 @@ package ent -import ( - "strings" -) - -func (m *Machine) GetOSNameAndVersion() string { - ret := m.Osname - if m.Osversion != "" { - if ret != "" { - ret += "/" - } - - ret += m.Osversion - } - - if ret == "" { - return "?" - } - - return ret +func (m *Machine) GetOsname() string { + return m.Osname } -func (b *Bouncer) GetOSNameAndVersion() string { - ret := b.Osname - if b.Osversion != "" { - if ret != "" { - ret += "/" - } - - ret += b.Osversion - } - - if ret == "" { - return "?" - } - - return ret +func (b *Bouncer) GetOsname() string { + return b.Osname } -func (m *Machine) GetFeatureFlagList() []string { - if m.Featureflags == "" { - return nil - } +func (m *Machine) GetOsversion() string { + return m.Osversion +} - return strings.Split(m.Featureflags, ",") +func (b *Bouncer) GetOsversion() string { + return b.Osversion } -func (b *Bouncer) GetFeatureFlagList() []string { - if b.Featureflags == "" { - return nil - } +func (m *Machine) GetFeatureflags() string { + return m.Featureflags +} - return strings.Split(b.Featureflags, ",") +func (b *Bouncer) GetFeatureflags() string { + return b.Featureflags } From b5776471fcb278c5266ffe98afd30c26950ea0a0 Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Fri, 5 Jul 2024 17:51:44 +0200 Subject: [PATCH 059/119] send metrics every 30 minutes to CAPI --- pkg/apiserver/apic_metrics.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 6d7b03a02fd..3279432973a 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -344,7 +344,9 @@ func (a *apic) SendMetrics(stop chan (bool)) { func (a *apic) SendUsageMetrics() { defer trace.CatchPanic("lapi/usageMetricsToAPIC") - ticker := time.NewTicker(5 * time.Second) + firstRun := true + + ticker := time.NewTicker(time.Millisecond) for { select { @@ -353,6 +355,10 @@ func (a *apic) SendUsageMetrics() { ticker.Stop() return case <-ticker.C: + if firstRun { + firstRun = false + ticker.Reset(30 * time.Minute) + } metrics, metricsId, err := a.GetUsageMetrics() if err != nil { log.Errorf("unable to get usage metrics: %s", err) From 66ad913f81be1a0e54d57160ecb3baa904736deb Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 4 Jul 2024 17:31:51 +0200 Subject: [PATCH 060/119] sort type names --- cmd/crowdsec-cli/metrics/store.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go index 48926488c07..55588ff148f 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/metrics/store.go @@ -28,17 +28,17 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ "acquisition": statAcquis{}, - "scenarios": statBucket{}, - "parsers": statParser{}, + "alerts": statAlert{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, + "decisions": statDecision{}, "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, "lapi-bouncer": statLapiBouncer{}, "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, + "lapi-machine": statLapiMachine{}, + "parsers": statParser{}, + "scenarios": statBucket{}, "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, "whitelists": statWhitelist{}, } } @@ -78,16 +78,16 @@ func (ms metricStore) Fetch(url string) error { /*walk*/ mAcquis := ms["acquisition"].(statAcquis) - mParser := ms["parsers"].(statParser) - mBucket := ms["scenarios"].(statBucket) + mAlert := ms["alerts"].(statAlert) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mDecision := ms["decisions"].(statDecision) mLapi := ms["lapi"].(statLapi) - mLapiMachine := ms["lapi-machine"].(statLapiMachine) mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) mLapiDecision := ms["lapi-decisions"].(statLapiDecision) - mDecision := ms["decisions"].(statDecision) - mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) - mAppsecRule := ms["appsec-rule"].(statAppsecRule) - mAlert := ms["alerts"].(statAlert) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mParser := ms["parsers"].(statParser) + mBucket := ms["scenarios"].(statBucket) mStash := ms["stash"].(statStash) mWhitelist := ms["whitelists"].(statWhitelist) From 90e60ecd777b1c315209d893b9ea9ced50927025 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 5 Jul 2024 09:31:03 +0200 Subject: [PATCH 061/119] explicit soft wrap --- cmd/crowdsec-cli/metrics/list.go | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/metrics/list.go index ba68aa4b64d..6f61b77a8c0 100644 --- a/cmd/crowdsec-cli/metrics/list.go +++ b/cmd/crowdsec-cli/metrics/list.go @@ -8,6 +8,9 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -34,15 +37,32 @@ func (cli *cliMetrics) list() error { switch cli.cfg().Cscli.Output { case "human": - t := cstable.New(color.Output, cli.cfg().Cscli.Color) - t.SetRowLines(true) - t.SetHeaders("Type", "Title", "Description") + out := color.Output + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Type", "Title", "Description"}) + t.SetColumnConfigs([]table.ColumnConfig{ + { + Name: "Type", + AlignHeader: text.AlignCenter, + }, + { + Name: "Title", + AlignHeader: text.AlignCenter, + }, + { + Name: "Description", + AlignHeader: text.AlignCenter, + WidthMax: 60, + WidthMaxEnforcer: text.WrapSoft, + }, + }) + t.Style().Options.SeparateRows = true for _, metric := range allMetrics { - t.AddRow(metric.Type, metric.Title, metric.Description) + t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - t.Render() + fmt.Fprintln(out, t.Render()) case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { From 7f923b05effdecae5879b26d36788e1eccf4b696 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 5 Jul 2024 09:46:03 +0200 Subject: [PATCH 062/119] extract method processMetrics() --- cmd/crowdsec-cli/metrics/store.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go index 55588ff148f..5e2e85b7c57 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/metrics/store.go @@ -75,8 +75,10 @@ func (ms metricStore) Fetch(url string) error { } log.Debugf("Finished reading metrics output, %d entries", len(result)) - /*walk*/ + return ms.processMetrics(result) +} +func (ms metricStore) processMetrics(result []*prom2json.Family) error { mAcquis := ms["acquisition"].(statAcquis) mAlert := ms["alerts"].(statAlert) mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) From e55f3a80a9edb6430ac03289b78ab046b3f5e612 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 5 Jul 2024 12:28:22 +0200 Subject: [PATCH 063/119] statbouncer.go --- cmd/crowdsec-cli/metrics/metrics.go | 4 +- cmd/crowdsec-cli/metrics/show.go | 23 ++- cmd/crowdsec-cli/metrics/statbouncer.go | 182 ++++++++++++++++++++++++ cmd/crowdsec-cli/metrics/store.go | 22 ++- cmd/crowdsec-cli/support.go | 6 +- pkg/database/database.go | 2 +- 6 files changed, 223 insertions(+), 16 deletions(-) create mode 100644 cmd/crowdsec-cli/metrics/statbouncer.go diff --git a/cmd/crowdsec-cli/metrics/metrics.go b/cmd/crowdsec-cli/metrics/metrics.go index 52d623dc37e..082fce20ee2 100644 --- a/cmd/crowdsec-cli/metrics/metrics.go +++ b/cmd/crowdsec-cli/metrics/metrics.go @@ -38,8 +38,8 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.show(nil, url, noUnit) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.show(cmd.Context(), nil, url, noUnit) }, } diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/metrics/show.go index 46603034f24..a3940fded16 100644 --- a/cmd/crowdsec-cli/metrics/show.go +++ b/cmd/crowdsec-cli/metrics/show.go @@ -1,11 +1,16 @@ package metrics import ( + "context" "errors" "fmt" + log "github.com/sirupsen/logrus" + "github.com/fatih/color" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) var ( @@ -13,7 +18,7 @@ var ( ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") ) -func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { +func (cli *cliMetrics) show(ctx context.Context, sections []string, url string, noUnit bool) error { cfg := cli.cfg() if url != "" { @@ -30,8 +35,16 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { ms := NewMetricStore() - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { - return err + // XXX: only on lapi + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + // XXX how to handle this - if we are not on lapi, etc. + // we may read lp metrics without lapi? + log.Warnf("unable to open database: %s", err) + } + + if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { + log.Warnf("unable to fetch metrics: %s", err) } // any section that we don't have in the store is an error @@ -90,9 +103,9 @@ cscli metrics list; cscli metrics list -o json cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { args = expandAlias(args) - return cli.show(args, url, noUnit) + return cli.show(cmd.Context(), args, url, noUnit) }, } diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/metrics/statbouncer.go new file mode 100644 index 00000000000..b9b77bda471 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statbouncer.go @@ -0,0 +1,182 @@ +package metrics + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + "time" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// un-aggregated data, de-normalized. +type bouncerMetricItem struct { + bouncerName string + ipType string + origin string + name string + unit string + value float64 +} + +type statBouncer struct { + // we keep de-normalized metrics so we can iterate + // over them multiple times and keep the aggregation code simple + rawMetrics []bouncerMetricItem + aggregated map[string]map[string]map[string]int64 +} + +func (s *statBouncer) Description() (string, string) { + return "Bouncer Metrics", + `Network traffic blocked by bouncers.` +} + +func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { + if db == nil { + return nil + } + + // query all bouncer metrics that have not been flushed + + metrics, err := db.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + ).All(ctx) + if err != nil { + return fmt.Errorf("unable to fetch metrics: %w", err) + } + + // keep track of oldest collection timestamp + var since *time.Time + + for i, met := range metrics { + collectedAt := met.CollectedAt + if since == nil || collectedAt.Before(*since) { + since = &collectedAt + } + + bouncerName := met.GeneratedBy + + type bouncerMetrics struct { + Metrics []models.DetailedMetrics `json:"metrics"` + } + + payload := bouncerMetrics{} + + err := json.Unmarshal([]byte(met.Payload), &payload) + if err != nil { + log.Warningf("while parsing metrics: %s", err) + } + + fmt.Printf("row %d, %s, %+v\n", i, bouncerName, payload) + + for _, m := range payload.Metrics { + for _, item := range m.Items { + labels := item.Labels + + // these are mandatory but we got pointers, so... + // XXX: but we should only print these once, even for repeated offenses + + if item.Name == nil { + log.Warningf("missing 'name' field in metrics reported by %s", bouncerName) + continue + } + name := *item.Name + + if item.Unit == nil { + log.Warningf("missing 'unit' field in metrics reported by %s", bouncerName) + continue + } + unit := *item.Unit + + if item.Value == nil { + log.Warningf("missing 'value' field in metrics reported by %s", bouncerName) + continue + } + value := *item.Value + + rawMetric := bouncerMetricItem{ + bouncerName: bouncerName, + ipType: labels["ip_type"], + origin: labels["origin"], + name: name, + unit: unit, + value: value, + } + + fmt.Printf("raw: %v\n", rawMetric) + + s.rawMetrics = append(s.rawMetrics, rawMetric) + } + } + } + + s.aggregate() + + return nil +} + +func (s *statBouncer) aggregate() { + // [bouncer][origin][name]value + + // XXX: how about blocked ips? + + if s.aggregated == nil { + s.aggregated = make(map[string]map[string]map[string]int64) + } + + for _, raw := range s.rawMetrics { + if _, ok := s.aggregated[raw.bouncerName]; !ok { + s.aggregated[raw.bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { + s.aggregated[raw.bouncerName][raw.origin] = make(map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name] = 0 + } + + s.aggregated[raw.bouncerName][raw.origin][raw.name] += int64(raw.value) + } +} + +func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + bouncerNames := make(map[string]bool) + for _, item := range s.rawMetrics { + bouncerNames[item.bouncerName] = true + } + + // [bouncer][origin]; where origin=="" is the total + + for bouncerName := range bouncerNames { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("", "Bytes", "Packets") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + // XXX: noUnit, showEmpty + // XXX: total of all origins + // XXX: blocked_ips and other metrics + // XXX: -o json + + for origin, metrics := range s.aggregated[bouncerName] { + t.AddRow(origin, + strconv.FormatInt(metrics["dropped_bytes"], 10), + strconv.FormatInt(metrics["dropped_packets"], 10), + ) + } + title, _ := s.Description() + cstable.RenderTitle(out, fmt.Sprintf("\n%s (%s):", title, bouncerName)) + t.Render() + } + +} diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go index 5e2e85b7c57..9c4250b1d7e 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/metrics/store.go @@ -1,6 +1,7 @@ package metrics import ( + "context" "encoding/json" "fmt" "io" @@ -16,6 +17,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/database" ) type metricSection interface { @@ -29,6 +32,7 @@ func NewMetricStore() metricStore { return metricStore{ "acquisition": statAcquis{}, "alerts": statAlert{}, + "bouncers": &statBouncer{}, "appsec-engine": statAppsecEngine{}, "appsec-rule": statAppsecRule{}, "decisions": statDecision{}, @@ -43,7 +47,15 @@ func NewMetricStore() metricStore { } } -func (ms metricStore) Fetch(url string) error { +func (ms metricStore) Fetch(ctx context.Context, url string, db *database.Client) error { + if err := ms["bouncers"].(*statBouncer).Fetch(ctx, db); err != nil { + return err + } + + return ms.fetchPrometheusMetrics(url) +} + +func (ms metricStore) fetchPrometheusMetrics(url string) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -75,10 +87,12 @@ func (ms metricStore) Fetch(url string) error { } log.Debugf("Finished reading metrics output, %d entries", len(result)) - return ms.processMetrics(result) + ms.processPrometheusMetrics(result) + + return nil } -func (ms metricStore) processMetrics(result []*prom2json.Family) error { +func (ms metricStore) processPrometheusMetrics(result []*prom2json.Family) { mAcquis := ms["acquisition"].(statAcquis) mAlert := ms["alerts"].(statAlert) mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) @@ -221,8 +235,6 @@ func (ms metricStore) processMetrics(result []*prom2json.Family) error { } } } - - return nil } func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 1b33ef38ada..55c8516e605 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -78,7 +78,7 @@ func stripAnsiString(str string) string { return reStripAnsi.ReplaceAllString(str, "") } -func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { +func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw *zip.Writer) error { log.Info("Collecting prometheus metrics") cfg := cli.cfg() @@ -91,7 +91,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { ms := metrics.NewMetricStore() - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { return err } @@ -493,7 +493,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { skipCAPI = true } - if err = cli.dumpMetrics(ctx, zipWriter); err != nil { + if err = cli.dumpMetrics(ctx, db, zipWriter); err != nil { log.Warn(err) } diff --git a/pkg/database/database.go b/pkg/database/database.go index 6f392c46d21..e18c4025fe7 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -38,7 +38,7 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. } if config.MaxOpenConns == nil { - log.Warningf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) + log.Debugf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) config.MaxOpenConns = ptr.Of(csconfig.DEFAULT_MAX_OPEN_CONNS) } From 8759e4ffd8e13396cc1d043667a2de99f3733561 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 9 Jul 2024 11:51:32 +0200 Subject: [PATCH 064/119] reduce verbositiy, format byte totals, warn only once per each error --- cmd/crowdsec-cli/metrics/number.go | 9 +++- cmd/crowdsec-cli/metrics/show.go | 2 +- cmd/crowdsec-cli/metrics/statbouncer.go | 65 +++++++++++++++++-------- cmd/crowdsec-cli/metrics/store.go | 2 +- cmd/crowdsec-cli/metrics/table.go | 7 +-- 5 files changed, 56 insertions(+), 29 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/number.go b/cmd/crowdsec-cli/metrics/number.go index a60d3db2a9b..2efa637ae85 100644 --- a/cmd/crowdsec-cli/metrics/number.go +++ b/cmd/crowdsec-cli/metrics/number.go @@ -3,6 +3,7 @@ package metrics import ( "fmt" "math" + "strconv" ) type unit struct { @@ -20,11 +21,15 @@ var ranges = []unit{ {value: 1, symbol: ""}, } -func formatNumber(num int) string { +func formatNumber(num int64, withUnit bool) string { + if !withUnit { + return strconv.FormatInt(num, 10) + } + goodUnit := unit{} for _, u := range ranges { - if int64(num) >= u.value { + if num >= u.value { goodUnit = u break } diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/metrics/show.go index a3940fded16..3e2b812b30f 100644 --- a/cmd/crowdsec-cli/metrics/show.go +++ b/cmd/crowdsec-cli/metrics/show.go @@ -44,7 +44,7 @@ func (cli *cliMetrics) show(ctx context.Context, sections []string, url string, } if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { - log.Warnf("unable to fetch metrics: %s", err) + log.Warn(err) } // any section that we don't have in the store is an error diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/metrics/statbouncer.go index b9b77bda471..60734900749 100644 --- a/cmd/crowdsec-cli/metrics/statbouncer.go +++ b/cmd/crowdsec-cli/metrics/statbouncer.go @@ -34,11 +34,22 @@ type statBouncer struct { aggregated map[string]map[string]map[string]int64 } +func (s *statBouncer) MarshalJSON() ([]byte, error) { + return json.Marshal(s.aggregated) +} + func (s *statBouncer) Description() (string, string) { return "Bouncer Metrics", `Network traffic blocked by bouncers.` } +func warnOnce(warningsLogged map[string]bool, msg string) { + if _, ok := warningsLogged[msg]; !ok { + log.Warningf(msg) + warningsLogged[msg] = true + } +} + func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { if db == nil { return nil @@ -57,7 +68,10 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { // keep track of oldest collection timestamp var since *time.Time - for i, met := range metrics { + // don't spam the user with the same warnings + warningsLogged := make(map[string]bool) + + for _, met := range metrics { collectedAt := met.CollectedAt if since == nil || collectedAt.Before(*since) { since = &collectedAt @@ -76,31 +90,36 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { log.Warningf("while parsing metrics: %s", err) } - fmt.Printf("row %d, %s, %+v\n", i, bouncerName, payload) - for _, m := range payload.Metrics { for _, item := range m.Items { labels := item.Labels // these are mandatory but we got pointers, so... - // XXX: but we should only print these once, even for repeated offenses + + valid := true if item.Name == nil { - log.Warningf("missing 'name' field in metrics reported by %s", bouncerName) - continue + warnOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + // no continue - keep checking the rest + valid = false } - name := *item.Name if item.Unit == nil { - log.Warningf("missing 'unit' field in metrics reported by %s", bouncerName) - continue + warnOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + valid = false } - unit := *item.Unit if item.Value == nil { - log.Warningf("missing 'value' field in metrics reported by %s", bouncerName) + warnOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + valid = false + } + + if !valid { continue } + + name := *item.Name + unit := *item.Unit value := *item.Value rawMetric := bouncerMetricItem{ @@ -112,8 +131,6 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { value: value, } - fmt.Printf("raw: %v\n", rawMetric) - s.rawMetrics = append(s.rawMetrics, rawMetric) } } @@ -157,26 +174,36 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm } // [bouncer][origin]; where origin=="" is the total + + fmt.Printf("ShowEmpty: %t\n", showEmpty) for bouncerName := range bouncerNames { t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("", "Bytes", "Packets") t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - // XXX: noUnit, showEmpty // XXX: total of all origins // XXX: blocked_ips and other metrics - // XXX: -o json + + numRows := 0 + + // we print one table per bouncer only if it has stats, so "showEmpty" has no effect + // unless we want a global table for all bouncers for origin, metrics := range s.aggregated[bouncerName] { t.AddRow(origin, - strconv.FormatInt(metrics["dropped_bytes"], 10), + formatNumber(metrics["dropped_bytes"], noUnit), strconv.FormatInt(metrics["dropped_packets"], 10), ) + + numRows += 1 + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, fmt.Sprintf("\n%s (%s):", title, bouncerName)) + t.Render() } - title, _ := s.Description() - cstable.RenderTitle(out, fmt.Sprintf("\n%s (%s):", title, bouncerName)) - t.Render() } } diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go index 9c4250b1d7e..dd71a133a12 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/metrics/store.go @@ -71,7 +71,7 @@ func (ms metricStore) fetchPrometheusMetrics(url string) error { err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { - errChan <- fmt.Errorf("failed to fetch metrics: %w", err) + errChan <- fmt.Errorf("while fetching metrics: %w", err) return } errChan <- nil diff --git a/cmd/crowdsec-cli/metrics/table.go b/cmd/crowdsec-cli/metrics/table.go index f51e905ba71..66874e69321 100644 --- a/cmd/crowdsec-cli/metrics/table.go +++ b/cmd/crowdsec-cli/metrics/table.go @@ -110,12 +110,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { - numberToShow := strconv.Itoa(v) - if !noUnit { - numberToShow = formatNumber(v) - } - - row = append(row, numberToShow) + row = append(row, formatNumber(int64(v), !noUnit)) } else { row = append(row, "-") } From 36e34a41473baf87fd89e62a3664deaa7eb53220 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 9 Jul 2024 12:46:43 +0200 Subject: [PATCH 065/119] todo --- cmd/crowdsec-cli/metrics/statbouncer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/metrics/statbouncer.go index 60734900749..f23d35601f7 100644 --- a/cmd/crowdsec-cli/metrics/statbouncer.go +++ b/cmd/crowdsec-cli/metrics/statbouncer.go @@ -149,6 +149,8 @@ func (s *statBouncer) aggregate() { if s.aggregated == nil { s.aggregated = make(map[string]map[string]map[string]int64) } + + // TODO: describe CAPI, total with all origins for _, raw := range s.rawMetrics { if _, ok := s.aggregated[raw.bouncerName]; !ok { @@ -175,8 +177,6 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm // [bouncer][origin]; where origin=="" is the total - fmt.Printf("ShowEmpty: %t\n", showEmpty) - for bouncerName := range bouncerNames { t := cstable.New(out, wantColor) t.SetRowLines(false) From 865756062762c3a2a94867a415d5554a1f8a32f4 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 9 Jul 2024 14:44:42 +0200 Subject: [PATCH 066/119] cscli metrics: drop support for '-o raw' (just use json) --- cmd/crowdsec-cli/metrics/list.go | 14 +++++--------- cmd/crowdsec-cli/metrics/store.go | 13 +++---------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/metrics/list.go index 6f61b77a8c0..c173be82378 100644 --- a/cmd/crowdsec-cli/metrics/list.go +++ b/cmd/crowdsec-cli/metrics/list.go @@ -6,7 +6,6 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" @@ -35,7 +34,9 @@ func (cli *cliMetrics) list() error { }) } - switch cli.cfg().Cscli.Output { + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { case "human": out := color.Output t := cstable.New(out, cli.cfg().Cscli.Color).Writer @@ -70,13 +71,8 @@ func (cli *cliMetrics) list() error { } fmt.Println(string(x)) - case "raw": - x, err := yaml.Marshal(allMetrics) - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go index dd71a133a12..4f3b8ff9ffa 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/metrics/store.go @@ -13,7 +13,6 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/prom2json" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" @@ -237,7 +236,7 @@ func (ms metricStore) processPrometheusMetrics(result []*prom2json.Family) { } } -func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, outputFormat string, noUnit bool) error { // copy only the sections we want want := map[string]metricSection{} @@ -253,7 +252,7 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, want[section] = ms[section] } - switch formatType { + switch outputFormat { case "human": for _, section := range maptools.SortedKeys(want) { want[section].Table(out, wantColor, noUnit, showEmpty) @@ -264,14 +263,8 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) - case "raw": - x, err := yaml.Marshal(want) - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) default: - return fmt.Errorf("unknown format type %s", formatType) + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil From df5ba9f62ec1f493b2697f274b035bf4594a7fb2 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 9 Jul 2024 17:33:11 +0200 Subject: [PATCH 067/119] wip bytes/packets --- cmd/crowdsec-cli/metrics/statbouncer.go | 55 ++++++++++++++++++------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/metrics/statbouncer.go index f23d35601f7..f02c325214c 100644 --- a/cmd/crowdsec-cli/metrics/statbouncer.go +++ b/cmd/crowdsec-cli/metrics/statbouncer.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" "io" - "strconv" + "strings" "time" + "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" log "github.com/sirupsen/logrus" @@ -31,7 +32,7 @@ type statBouncer struct { // we keep de-normalized metrics so we can iterate // over them multiple times and keep the aggregation code simple rawMetrics []bouncerMetricItem - aggregated map[string]map[string]map[string]int64 + aggregated map[string]map[string]map[string]map[string]int64 } func (s *statBouncer) MarshalJSON() ([]byte, error) { @@ -122,6 +123,14 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { unit := *item.Unit value := *item.Value + if unit == "byte" { + name = strings.TrimSuffix(name, "_bytes") + } + + if unit == "packet" { + name = strings.TrimSuffix(name, "_packets") + } + rawMetric := bouncerMetricItem{ bouncerName: bouncerName, ipType: labels["ip_type"], @@ -142,30 +151,34 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { } func (s *statBouncer) aggregate() { - // [bouncer][origin][name]value + // [bouncer][origin][name][unit]value // XXX: how about blocked ips? if s.aggregated == nil { - s.aggregated = make(map[string]map[string]map[string]int64) + s.aggregated = make(map[string]map[string]map[string]map[string]int64) } // TODO: describe CAPI, total with all origins for _, raw := range s.rawMetrics { if _, ok := s.aggregated[raw.bouncerName]; !ok { - s.aggregated[raw.bouncerName] = make(map[string]map[string]int64) + s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) } if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { - s.aggregated[raw.bouncerName][raw.origin] = make(map[string]int64) + s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]int64) } if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name] = 0 + s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 } - s.aggregated[raw.bouncerName][raw.origin][raw.name] += int64(raw.value) + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) } } @@ -178,10 +191,16 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm // [bouncer][origin]; where origin=="" is the total for bouncerName := range bouncerNames { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("", "Bytes", "Packets") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"", "Bytes", "Bytes", "Packets", "Packets"}, table.RowConfig{AutoMerge: true}) + t.AppendHeader(table.Row{"", "processed", "dropped", "processed", "dropped"}) + t.SetColumnConfigs([]table.ColumnConfig{ + {Number:1, Align: text.AlignLeft}, + {Number:2, Align: text.AlignRight}, + {Number:3, Align: text.AlignRight}, + {Number:4, Align: text.AlignRight}, + {Number:5, Align: text.AlignRight}, + }) // XXX: total of all origins // XXX: blocked_ips and other metrics @@ -191,9 +210,13 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm // unless we want a global table for all bouncers for origin, metrics := range s.aggregated[bouncerName] { - t.AddRow(origin, - formatNumber(metrics["dropped_bytes"], noUnit), - strconv.FormatInt(metrics["dropped_packets"], 10), + t.AppendRow( + table.Row{origin, + formatNumber(metrics["processed"]["byte"], !noUnit), + formatNumber(metrics["dropped"]["byte"], !noUnit), + formatNumber(metrics["processed"]["packet"], !noUnit), + formatNumber(metrics["dropped"]["packet"], !noUnit), + }, ) numRows += 1 @@ -202,7 +225,7 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() cstable.RenderTitle(out, fmt.Sprintf("\n%s (%s):", title, bouncerName)) - t.Render() + fmt.Fprintln(out, t.Render()) } } From 7665c7a49e32dfea91ede73530990e78d9429e38 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 10 Jul 2024 10:44:19 +0200 Subject: [PATCH 068/119] update func tests --- test/bats/08_metrics.bats | 18 ++------ test/bats/08_metrics_bouncer.bats | 46 +++++++++++++++++++ ..._metrics.bats => 08_metrics_machines.bats} | 17 +++---- test/lib/setup_file.sh | 15 ++++-- 4 files changed, 66 insertions(+), 30 deletions(-) create mode 100644 test/bats/08_metrics_bouncer.bats rename test/bats/{30_machines_metrics.bats => 08_metrics_machines.bats} (80%) diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index 8bf30812cff..e260e667524 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -23,9 +23,9 @@ teardown() { #---------- @test "cscli metrics (crowdsec not running)" { - rune -1 cscli metrics - # crowdsec is down - assert_stderr --partial 'failed to fetch metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' + rune -0 cscli metrics + # crowdsec is down, we won't get an error because some metrics come from the db instead + assert_stderr --partial 'while fetching metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' } @test "cscli metrics (bad configuration)" { @@ -72,10 +72,6 @@ teardown() { rune -0 jq 'keys' <(output) assert_output --partial '"alerts",' assert_output --partial '"parsers",' - - rune -0 cscli metrics -o raw - assert_output --partial 'alerts: {}' - assert_output --partial 'parsers: {}' } @test "cscli metrics list" { @@ -85,10 +81,6 @@ teardown() { rune -0 cscli metrics list -o json rune -0 jq -c '.[] | [.type,.title]' <(output) assert_line '["acquisition","Acquisition Metrics"]' - - rune -0 cscli metrics list -o raw - assert_line "- type: acquisition" - assert_line " title: Acquisition Metrics" } @test "cscli metrics show" { @@ -108,8 +100,4 @@ teardown() { rune -0 cscli metrics show lapi -o json rune -0 jq -c '.lapi."/v1/watchers/login" | keys' <(output) assert_json '["POST"]' - - rune -0 cscli metrics show lapi -o raw - assert_line 'lapi:' - assert_line ' /v1/watchers/login:' } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats new file mode 100644 index 00000000000..e061f001232 --- /dev/null +++ b/test/bats/08_metrics_bouncer.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli metrics show bouncers" { + # there are no bouncers, so no metrics yet + rune -0 cscli metrics show bouncers + refute_output +} + +@test "rc usage metrics (empty payload)" { + # a registered bouncer can send metrics for the lapi and console + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(cat <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -22 curl-with-key '/v1/usage-metrics' --data "$(echo "$payload" | yq -o j)" -X POST + assert_stderr 'curl: (22) The requested URL returned error: 404' + refute_output +} diff --git a/test/bats/30_machines_metrics.bats b/test/bats/08_metrics_machines.bats similarity index 80% rename from test/bats/30_machines_metrics.bats rename to test/bats/08_metrics_machines.bats index e29e2175037..63bc8b8ec6c 100644 --- a/test/bats/30_machines_metrics.bats +++ b/test/bats/08_metrics_machines.bats @@ -23,8 +23,8 @@ teardown() { #---------- -@test "usage metrics (empty payload)" { - # a registered log processor can send metrics for the console +@test "lp usage metrics (empty payload)" { + # a registered log processor can send metrics for the lapi and console token=$(lp_login) usage_metrics="http://localhost:8080/v1/usage-metrics" @@ -34,16 +34,12 @@ teardown() { EOT ) - rune -22 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" - refute_output - assert_stderr 'curl: (22) The requested URL returned error: 400' - - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -22 curl -sS --fail-with-body -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + assert_stderr --partial 'error: 400' assert_json '{message: "Missing log processor data"}' - refute_stderr } -@test "usage metrics (bad payload)" { +@test "lp usage metrics (bad payload)" { token=$(lp_login) usage_metrics="http://localhost:8080/v1/usage-metrics" @@ -65,10 +61,9 @@ teardown() { log_processors.0.datasources in body is required log_processors.0.hub_items in body is required EOT - } -@test "usage metrics (full payload)" { +@test "lp usage metrics (full payload)" { token=$(lp_login) usage_metrics="http://localhost:8080/v1/usage-metrics" diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index bd322767445..46c43ddfc33 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -299,15 +299,22 @@ lp_login() { export -f lp_login # call the lapi through unix socket with an API_KEY (authenticates as a bouncer) +# after $1, pass throught extra arguments to curl lapi-get() { - [[ -z "$1" ]] && { fail "lapi-get: missing path"; } - [[ -z "$API_KEY" ]] && { fail "lapi-get: missing API_KEY"; } + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + shift + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } local socket socket=$(config_get '.api.server.listen_socket') - [[ -z "$socket" ]] && { fail "lapi-get: missing .api.server.listen_socket"; } + [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } # curl needs a fake hostname when using a unix socket - curl -s -f -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" + curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" "$@" } export -f lapi-get +# alias to change naming later +curl-with-key() { + lapi-get "$@" +} +export -f curl-with-key From 6670b0886107e3f795c8191db664bb07457bd6ff Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 10 Jul 2024 11:07:25 +0200 Subject: [PATCH 069/119] fup --- test/bats/08_metrics_bouncer.bats | 4 ++-- test/lib/setup_file.sh | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index e061f001232..cd6286a27cc 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -41,6 +41,6 @@ teardown() { ) rune -22 curl-with-key '/v1/usage-metrics' --data "$(echo "$payload" | yq -o j)" -X POST - assert_stderr 'curl: (22) The requested URL returned error: 404' - refute_output + assert_stderr 'curl: (22) The requested URL returned error: 400' + assert_json '{message: "Missing remediation component data"}' } diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 46c43ddfc33..b70a0498579 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -298,10 +298,9 @@ lp_login() { } export -f lp_login -# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) -# after $1, pass throught extra arguments to curl lapi-get() { [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 shift [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } local socket @@ -309,11 +308,12 @@ lapi-get() { [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } # curl needs a fake hostname when using a unix socket - curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" "$@" + curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$path" "$@" } export -f lapi-get -# alias to change naming later +# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) +# after $1, pass throught extra arguments to curl curl-with-key() { lapi-get "$@" } From 1cac76ae87e97d15bb13fe36eaab939761f783b3 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 10 Jul 2024 13:35:50 +0200 Subject: [PATCH 070/119] more readable tests --- test/bats/08_metrics_bouncer.bats | 26 ++++++++++-- test/bats/08_metrics_machines.bats | 30 ++++++------- test/lib/setup_file.sh | 67 +++++++++++++++++++++--------- 3 files changed, 85 insertions(+), 38 deletions(-) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index cd6286a27cc..5c89423ce3d 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -34,13 +34,33 @@ teardown() { API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - payload=$(cat <<-EOT + payload=$(yq -o j <<-EOT remediation_components: [] log_processors: [] EOT ) - rune -22 curl-with-key '/v1/usage-metrics' --data "$(echo "$payload" | yq -o j)" -X POST - assert_stderr 'curl: (22) The requested URL returned error: 400' + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial 'error: 400' assert_json '{message: "Missing remediation component data"}' } + +@test "rc usage metrics (bad payload)" { + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + log_processors: [] + EOT + ) + + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + remediation_components.0.utc_startup_timestamp in body is required + EOT +} diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats index 63bc8b8ec6c..3b73839e753 100644 --- a/test/bats/08_metrics_machines.bats +++ b/test/bats/08_metrics_machines.bats @@ -25,35 +25,33 @@ teardown() { @test "lp usage metrics (empty payload)" { # a registered log processor can send metrics for the lapi and console - token=$(lp_login) - usage_metrics="http://localhost:8080/v1/usage-metrics" + TOKEN=$(lp-get-token) + export TOKEN - payload=$(cat <<-EOT + payload=$(yq -o j <<-EOT remediation_components: [] log_processors: [] EOT ) - rune -22 curl -sS --fail-with-body -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -22 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" assert_stderr --partial 'error: 400' assert_json '{message: "Missing log processor data"}' } @test "lp usage metrics (bad payload)" { - token=$(lp_login) - usage_metrics="http://localhost:8080/v1/usage-metrics" + TOKEN=$(lp-get-token) + export TOKEN - payload=$(cat <<-EOT + payload=$(yq -o j <<-EOT remediation_components: [] log_processors: - version: "v1.0" EOT ) - rune -22 curl -f -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" - assert_stderr "curl: (22) The requested URL returned error: 422" - - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -22 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" rune -0 jq -r '.message' <(output) assert_output - <<-EOT validation failure list: @@ -64,10 +62,12 @@ teardown() { } @test "lp usage metrics (full payload)" { - token=$(lp_login) - usage_metrics="http://localhost:8080/v1/usage-metrics" + TOKEN=$(lp-get-token) + export TOKEN + + # base payload without any measurement - payload=$(cat <<-EOT + payload=$(yq -o j <<-EOT remediation_components: [] log_processors: - version: "v1.0" @@ -95,6 +95,6 @@ teardown() { EOT ) - rune -0 curl -sS -H "Authorization: Bearer ${token}" -X POST "$usage_metrics" --data "$(echo "$payload" | yq -o j)" + rune -0 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" refute_output } diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 12eff1acd57..7cbced01ef1 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -282,34 +282,61 @@ rune() { } export -f rune -# as a log processor, connect to lapi and get a token -lp_login() { +# call the lapi through unix socket +# the path (and query string) must be the first parameter, the others will be passed to curl +curl-socket() { + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 + shift + local socket + socket=$(config_get '.api.server.listen_socket') + [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } + # curl needs a fake hostname when using a unix socket + curl --unix-socket "$socket" "http://lapi$path" "$@" +} +export -f curl-socket + +# call the lapi through tcp +# the path (and query string) must be the first parameter, the others will be passed to curl +curl-tcp() { + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 + shift local cred cred=$(config_get .api.client.credentials_path) - local url - url="$(yq '.url' < "$cred")/v1/watchers/login" - local resp - resp=$(yq -oj -I0 '{"machine_id":.login,"password":.password}' < "$cred" | curl -s -X POST "$url" --data-binary @-) - if [[ "$(yq -e '.code' <<<"$resp")" != 200 ]]; then - echo "login_lp: failed to login" >&3 - return 1 - fi - echo "$resp" | yq -r '.token' + local base_url + base_url="$(yq '.url' < "$cred")" + curl "$base_url$path" "$@" } -export -f lp_login +export -f curl-tcp # call the lapi through unix socket with an API_KEY (authenticates as a bouncer) # after $1, pass throught extra arguments to curl curl-with-key() { - [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } - local path=$1 - shift [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } - local socket - socket=$(config_get '.api.server.listen_socket') - [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } + curl-tcp "$@" -sS --fail-with-body -H "X-Api-Key: $API_KEY" +} +export -f curl-with-key +# call the lapi through unix socket with a TOKEN (authenticates as a machine) +# after $1, pass throught extra arguments to curl +curl-with-token() { + [[ -z "$TOKEN" ]] && { fail "${FUNCNAME[0]}: missing TOKEN"; } # curl needs a fake hostname when using a unix socket - curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$path" "$@" + curl-tcp "$@" -sS --fail-with-body -H "Authorization: Bearer $TOKEN" } -export -f curl-with-key +export -f curl-with-token + +# as a log processor, connect to lapi and get a token +lp-get-token() { + local cred + cred=$(config_get .api.client.credentials_path) + local resp + resp=$(yq -oj -I0 '{"machine_id":.login,"password":.password}' < "$cred" | curl-socket '/v1/watchers/login' -s -X POST --data-binary @-) + if [[ "$(yq -e '.code' <<<"$resp")" != 200 ]]; then + echo "login_lp: failed to login" >&3 + return 1 + fi + echo "$resp" | yq -r '.token' +} +export -f lp-get-token From 6df2462c422775407a1df7b59891773ea57c1db3 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 10 Jul 2024 14:46:49 +0200 Subject: [PATCH 071/119] more bouncer tests --- test/bats/08_metrics_bouncer.bats | 66 +++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 5c89423ce3d..478fa207ae0 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -63,4 +63,70 @@ teardown() { validation failure list: remediation_components.0.utc_startup_timestamp in body is required EOT + + # validation, like timestamp format + + payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = "2021-09-01T00:00:00Z"' <<<"$payload") + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 400" + assert_json '{message: "json: cannot unmarshal string into Go struct field AllMetrics.remediation_components of type int64"}' + + payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + refute_output +} + +@test "rc usage metrics (good payload)" { + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + log_processors: [] + EOT + ) + + # bouncers have feature flags too + + payload=$(yq -o j ' + .remediation_components[0].feature_flags = ["huey", "dewey", "louie"] | + .remediation_components[0].os = {"name": "Multics", "version": "MR12.5"} + ' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli bouncer inspect testbouncer -o json + rune -0 yq -o j '[.os,.featureflags]' <(output) + assert_json '["Multics/MR12.5",["huey","dewey","louie"]]' + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707399316, "window_size_seconds":600}, + "items":[ + {"name": "foo", "unit": "pound", "value": 3.1415926}, + {"name": "foo", "unit": "pound", "value": 2.7182818}, + {"name": "foo", "unit": "dogyear", "value": 2.7182818} + ] + } + ] + ' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + # aggregation is ok -- we are truncating, not rounding, because the float is mandated by swagger. + # but without labels the origin string is empty + assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' + + # TODO: adjust the table output + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + + Bouncer Metrics (testbouncer): + +--+---------------------+---------------------+ + | | Bytes | Packets | + | | processed | dropped | processed | dropped | + +--+-----------+---------+-----------+---------+ + | | NaN | NaN | NaN | NaN | + +--+-----------+---------+-----------+---------+ + EOT } From 4577568b1a16e86d9e51ba64e03237b128c19606 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 10 Jul 2024 17:01:39 +0200 Subject: [PATCH 072/119] test data --- cmd/crowdsec-cli/metrics/statbouncer.go | 6 +- test/bats/08_metrics_bouncer.bats | 100 ++++++++++++++++++++++-- 2 files changed, 98 insertions(+), 8 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/metrics/statbouncer.go index f02c325214c..3d34af0824a 100644 --- a/cmd/crowdsec-cli/metrics/statbouncer.go +++ b/cmd/crowdsec-cli/metrics/statbouncer.go @@ -192,7 +192,7 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm for bouncerName := range bouncerNames { t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"", "Bytes", "Bytes", "Packets", "Packets"}, table.RowConfig{AutoMerge: true}) + t.AppendHeader(table.Row{"Origin", "Bytes", "Bytes", "Packets", "Packets"}, table.RowConfig{AutoMerge: true}) t.AppendHeader(table.Row{"", "processed", "dropped", "processed", "dropped"}) t.SetColumnConfigs([]table.ColumnConfig{ {Number:1, Align: text.AlignLeft}, @@ -224,7 +224,9 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, fmt.Sprintf("\n%s (%s):", title, bouncerName)) + // don't use SetTitle() because it draws the title inside table box + // TODO: newline position wrt other stat tables + cstable.RenderTitle(out, fmt.Sprintf("%s (%s):", title, bouncerName)) fmt.Fprintln(out, t.Render()) } } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 478fa207ae0..2e1da3b0538 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -120,13 +120,101 @@ teardown() { # TODO: adjust the table output rune -0 cscli metrics show bouncers assert_output - <<-EOT + Bouncer Metrics (testbouncer): + +--------+---------------------+---------------------+ + | Origin | Bytes | Packets | + | | processed | dropped | processed | dropped | + +--------+-----------+---------+-----------+---------+ + | | NaN | NaN | NaN | NaN | + +--------+-----------+---------+-----------+---------+ + EOT + + # some more realistic values, at least for the labels + # we don't use the same now_timestamp or the payload will be silently discarded + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 51936, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, + {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, + {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "dropped", "unit": "byte", "value": 1034, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, + {"name": "dropped", "unit": "byte", "value": 3847, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "byte", "value": 380, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, + {"name": "dropped", "unit": "packet", "value": 100, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, + {"name": "dropped", "unit": "packet", "value": 10, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "dropped", "unit": "packet", "value": 23, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, + {"name": "dropped", "unit": "packet", "value": 58, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + assert_json '{ + "bouncers": { + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 1 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 51936 + }, + "dropped": { + "byte": 3847, + "packet": 58 + } + } + } + } + }' + rune -0 cscli metrics show bouncers + assert_output - <<-EOT Bouncer Metrics (testbouncer): - +--+---------------------+---------------------+ - | | Bytes | Packets | - | | processed | dropped | processed | dropped | - +--+-----------+---------+-----------+---------+ - | | NaN | NaN | NaN | NaN | - +--+-----------+---------+-----------+---------+ + +----------------------------------+---------------------+---------------------+ + | Origin | Bytes | Packets | + | | processed | dropped | processed | dropped | + +----------------------------------+-----------+---------+-----------+---------+ + | | NaN | NaN | NaN | NaN | + | lists:firehol_voipbl | NaN | 3.85k | NaN | 58 | + | cscli | NaN | 380 | NaN | 10 | + | CAPI | NaN | 3.80k | NaN | 100 | + | lists:firehol_cruzit_web_attacks | NaN | 1.03k | NaN | 23 | + +----------------------------------+-----------+---------+-----------+---------+ EOT + + # TODO: total of all origin, multiple item lists, active decisions discard older values + } From 70f9c65149dd6a5bf07e2256aab3c47c8586daf1 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 10 Jul 2024 17:16:55 +0200 Subject: [PATCH 073/119] stable row order --- cmd/crowdsec-cli/metrics/statbouncer.go | 8 ++++++-- test/bats/08_metrics_bouncer.bats | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/metrics/statbouncer.go index 3d34af0824a..ef4a97a7cc5 100644 --- a/cmd/crowdsec-cli/metrics/statbouncer.go +++ b/cmd/crowdsec-cli/metrics/statbouncer.go @@ -12,6 +12,8 @@ import ( "github.com/jedib0t/go-pretty/v6/text" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" @@ -190,7 +192,7 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm // [bouncer][origin]; where origin=="" is the total - for bouncerName := range bouncerNames { + for _, bouncerName := range maptools.SortedKeys(bouncerNames) { t := cstable.New(out, wantColor).Writer t.AppendHeader(table.Row{"Origin", "Bytes", "Bytes", "Packets", "Packets"}, table.RowConfig{AutoMerge: true}) t.AppendHeader(table.Row{"", "processed", "dropped", "processed", "dropped"}) @@ -209,7 +211,9 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm // we print one table per bouncer only if it has stats, so "showEmpty" has no effect // unless we want a global table for all bouncers - for origin, metrics := range s.aggregated[bouncerName] { + // sort origins for stable output + for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + metrics := s.aggregated[bouncerName][origin] t.AppendRow( table.Row{origin, formatNumber(metrics["processed"]["byte"], !noUnit), diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 2e1da3b0538..407db7e8396 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -208,10 +208,10 @@ teardown() { | | processed | dropped | processed | dropped | +----------------------------------+-----------+---------+-----------+---------+ | | NaN | NaN | NaN | NaN | - | lists:firehol_voipbl | NaN | 3.85k | NaN | 58 | - | cscli | NaN | 380 | NaN | 10 | | CAPI | NaN | 3.80k | NaN | 100 | + | cscli | NaN | 380 | NaN | 10 | | lists:firehol_cruzit_web_attacks | NaN | 1.03k | NaN | 23 | + | lists:firehol_voipbl | NaN | 3.85k | NaN | 58 | +----------------------------------+-----------+---------+-----------+---------+ EOT From f525003c84a9bd96089eeb3d701646b893723f9a Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 11 Jul 2024 09:55:17 +0200 Subject: [PATCH 074/119] rename package to avoid shadowing --- cmd/crowdsec-cli/{metrics => climetrics}/list.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/metrics.go | 4 ++-- cmd/crowdsec-cli/{metrics => climetrics}/number.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/show.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statacquis.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statalert.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statappsecengine.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statappsecrule.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statbouncer.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statbucket.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statdecision.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statlapi.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statlapibouncer.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statlapidecision.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statlapimachine.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statparser.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statstash.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/statwhitelist.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/store.go | 2 +- cmd/crowdsec-cli/{metrics => climetrics}/table.go | 2 +- cmd/crowdsec-cli/main.go | 4 ++-- cmd/crowdsec-cli/support.go | 4 ++-- 22 files changed, 25 insertions(+), 25 deletions(-) rename cmd/crowdsec-cli/{metrics => climetrics}/list.go (99%) rename cmd/crowdsec-cli/{metrics => climetrics}/metrics.go (95%) rename cmd/crowdsec-cli/{metrics => climetrics}/number.go (97%) rename cmd/crowdsec-cli/{metrics => climetrics}/show.go (99%) rename cmd/crowdsec-cli/{metrics => climetrics}/statacquis.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statalert.go (97%) rename cmd/crowdsec-cli/{metrics => climetrics}/statappsecengine.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statappsecrule.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statbouncer.go (99%) rename cmd/crowdsec-cli/{metrics => climetrics}/statbucket.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statdecision.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapi.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapibouncer.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapidecision.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapimachine.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statparser.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statstash.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statwhitelist.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/store.go (99%) rename cmd/crowdsec-cli/{metrics => climetrics}/table.go (99%) diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/climetrics/list.go similarity index 99% rename from cmd/crowdsec-cli/metrics/list.go rename to cmd/crowdsec-cli/climetrics/list.go index c173be82378..074f0603555 100644 --- a/cmd/crowdsec-cli/metrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "encoding/json" diff --git a/cmd/crowdsec-cli/metrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go similarity index 95% rename from cmd/crowdsec-cli/metrics/metrics.go rename to cmd/crowdsec-cli/climetrics/metrics.go index 082fce20ee2..f3bc4874460 100644 --- a/cmd/crowdsec-cli/metrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "github.com/spf13/cobra" @@ -12,7 +12,7 @@ type cliMetrics struct { cfg configGetter } -func NewCLI(cfg configGetter) *cliMetrics { +func New(cfg configGetter) *cliMetrics { return &cliMetrics{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/metrics/number.go b/cmd/crowdsec-cli/climetrics/number.go similarity index 97% rename from cmd/crowdsec-cli/metrics/number.go rename to cmd/crowdsec-cli/climetrics/number.go index 2efa637ae85..e8144a7f0c9 100644 --- a/cmd/crowdsec-cli/metrics/number.go +++ b/cmd/crowdsec-cli/climetrics/number.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "fmt" diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/climetrics/show.go similarity index 99% rename from cmd/crowdsec-cli/metrics/show.go rename to cmd/crowdsec-cli/climetrics/show.go index 3e2b812b30f..b567a911107 100644 --- a/cmd/crowdsec-cli/metrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "context" diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statacquis.go rename to cmd/crowdsec-cli/climetrics/statacquis.go index c004f64f17a..a18b8a2e2a2 100644 --- a/cmd/crowdsec-cli/metrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go similarity index 97% rename from cmd/crowdsec-cli/metrics/statalert.go rename to cmd/crowdsec-cli/climetrics/statalert.go index c8055910a3a..65009fa322d 100644 --- a/cmd/crowdsec-cli/metrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statappsecengine.go rename to cmd/crowdsec-cli/climetrics/statappsecengine.go index 18ad03ef03f..e4e0048daaf 100644 --- a/cmd/crowdsec-cli/metrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statappsecrule.go rename to cmd/crowdsec-cli/climetrics/statappsecrule.go index 17ec28de99e..9647a111b77 100644 --- a/cmd/crowdsec-cli/metrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "fmt" diff --git a/cmd/crowdsec-cli/metrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go similarity index 99% rename from cmd/crowdsec-cli/metrics/statbouncer.go rename to cmd/crowdsec-cli/climetrics/statbouncer.go index ef4a97a7cc5..42aff81c63d 100644 --- a/cmd/crowdsec-cli/metrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "context" diff --git a/cmd/crowdsec-cli/metrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statbucket.go rename to cmd/crowdsec-cli/climetrics/statbucket.go index 62ca4dee71d..836fa0ed1ab 100644 --- a/cmd/crowdsec-cli/metrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statdecision.go rename to cmd/crowdsec-cli/climetrics/statdecision.go index b1474d95f76..485644a55ba 100644 --- a/cmd/crowdsec-cli/metrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapi.go rename to cmd/crowdsec-cli/climetrics/statlapi.go index f8a737e5c44..7d8831aad74 100644 --- a/cmd/crowdsec-cli/metrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapibouncer.go rename to cmd/crowdsec-cli/climetrics/statlapibouncer.go index e7483c6294d..3ee35adfe9a 100644 --- a/cmd/crowdsec-cli/metrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapidecision.go rename to cmd/crowdsec-cli/climetrics/statlapidecision.go index 97e17fe8a49..5f4d2c07764 100644 --- a/cmd/crowdsec-cli/metrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapimachine.go rename to cmd/crowdsec-cli/climetrics/statlapimachine.go index 6b9d9da207e..2f81ccb5751 100644 --- a/cmd/crowdsec-cli/metrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statparser.go rename to cmd/crowdsec-cli/climetrics/statparser.go index d8d651f269f..58ce2248648 100644 --- a/cmd/crowdsec-cli/metrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statstash.go rename to cmd/crowdsec-cli/climetrics/statstash.go index 79c14b04fd6..9de3469bea1 100644 --- a/cmd/crowdsec-cli/metrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statwhitelist.go rename to cmd/crowdsec-cli/climetrics/statwhitelist.go index 89a016d22b0..6848452458b 100644 --- a/cmd/crowdsec-cli/metrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/climetrics/store.go similarity index 99% rename from cmd/crowdsec-cli/metrics/store.go rename to cmd/crowdsec-cli/climetrics/store.go index 4f3b8ff9ffa..5de50558e89 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "context" diff --git a/cmd/crowdsec-cli/metrics/table.go b/cmd/crowdsec-cli/climetrics/table.go similarity index 99% rename from cmd/crowdsec-cli/metrics/table.go rename to cmd/crowdsec-cli/climetrics/table.go index 66874e69321..c446f2267e9 100644 --- a/cmd/crowdsec-cli/metrics/table.go +++ b/cmd/crowdsec-cli/climetrics/table.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "errors" diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index bd9d8988132..d4046414030 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,7 +14,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -252,7 +252,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) - cmd.AddCommand(metrics.NewCLI(cli.cfg).NewCommand()) + cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 55c8516e605..324be2710fd 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -89,7 +89,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw humanMetrics := new(bytes.Buffer) - ms := metrics.NewMetricStore() + ms := climetrics.NewMetricStore() if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { return err From 22369941cbb88b608a81e2f4b43c6cbcf5cb99eb Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 11 Jul 2024 11:00:08 +0200 Subject: [PATCH 075/119] remove metric payload size limit --- pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/schema/metric.go | 2 +- test/lib/db/instance-mysql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 5b436830192..c1ce25bddef 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -255,7 +255,7 @@ var ( {Name: "generated_by", Type: field.TypeString}, {Name: "collected_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, - {Name: "payload", Type: field.TypeString}, + {Name: "payload", Type: field.TypeString, Size: 2147483647}, } // MetricsTable holds the schema information for the "metrics" table. MetricsTable = &schema.Table{ diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index 9de3f21f464..b47da78bdf3 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -28,7 +28,7 @@ func (Metric) Fields() []ent.Field { Nillable(). Optional(). Comment("When the metrics are sent to the console"), - field.String("payload"). + field.Text("payload"). Immutable(). Comment("The actual metrics (item0)"), } diff --git a/test/lib/db/instance-mysql b/test/lib/db/instance-mysql index 6b40c84acba..df38f09761f 100755 --- a/test/lib/db/instance-mysql +++ b/test/lib/db/instance-mysql @@ -21,7 +21,7 @@ about() { check_requirements() { if ! command -v mysql >/dev/null; then - die "missing required program 'mysql' as a mysql client (package mariadb-client-core-10.6 on debian like system)" + die "missing required program 'mysql' as a mysql client (package mariadb-client on debian like system)" fi } From 3519bf4785d4b9c5789e4c531e8448440b00480e Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 11 Jul 2024 11:34:14 +0200 Subject: [PATCH 076/119] update log/error messages; lint --- cmd/crowdsec-cli/climetrics/statbouncer.go | 10 ++-------- pkg/csconfig/database.go | 1 - pkg/database/machines.go | 9 +++++---- pkg/database/metrics.go | 5 ++--- 4 files changed, 9 insertions(+), 16 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 42aff81c63d..bec014be18a 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -98,7 +98,7 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { labels := item.Labels // these are mandatory but we got pointers, so... - + valid := true if item.Name == nil { @@ -154,15 +154,10 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { func (s *statBouncer) aggregate() { // [bouncer][origin][name][unit]value - - // XXX: how about blocked ips? - if s.aggregated == nil { s.aggregated = make(map[string]map[string]map[string]map[string]int64) } - // TODO: describe CAPI, total with all origins - for _, raw := range s.rawMetrics { if _, ok := s.aggregated[raw.bouncerName]; !ok { s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) @@ -205,7 +200,7 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm }) // XXX: total of all origins // XXX: blocked_ips and other metrics - + numRows := 0 // we print one table per bouncer only if it has stats, so "showEmpty" has no effect @@ -234,5 +229,4 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm fmt.Fprintln(out, t.Render()) } } - } diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 31006577506..b3874014556 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -39,7 +39,6 @@ type DatabaseCfg struct { } type AuthGCCfg struct { - // XXX: define these as custom type (with days etc.) ? Cert *string `yaml:"cert,omitempty"` CertDuration *time.Duration Api *string `yaml:"api_key,omitempty"` diff --git a/pkg/database/machines.go b/pkg/database/machines.go index c6325b08260..35273d28d27 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -58,8 +58,9 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B // TODO: update scenarios Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update base machine metrics in database: %s", err) + return fmt.Errorf("unable to update base machine metrics in database: %w", err) } + return nil } @@ -203,7 +204,7 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { SetScenarios(scenarios). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine in database: %s", err) + return fmt.Errorf("unable to update machine in database: %w", err) } return nil @@ -214,7 +215,7 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { SetIpAddress(ipAddr). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine IP in database: %s", err) + return fmt.Errorf("unable to update machine IP in database: %w", err) } return nil @@ -225,7 +226,7 @@ func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { SetVersion(ipAddr). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine version in database: %s", err) + return fmt.Errorf("unable to update machine version in database: %w", err) } return nil diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index b850ea0be07..1f7e4cf217b 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -13,7 +13,6 @@ import ( // CollectMetricsToPush (count limit? including stale?) // SetPushedMetrics // RemoveOldMetrics -// avoid errors.Wrapf func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, collectedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. @@ -97,8 +96,8 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric Order(ent.Desc(metric.FieldCollectedAt)). All(c.CTX) if err != nil { - c.Log.Warningf("GetBouncerUsageMetricsByOrigin: %s", err) - return nil, fmt.Errorf("getting bouncer usage metrics by origin %s: %w", bouncerName, err) + c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) + return nil, fmt.Errorf("getting bouncer usage metrics by name %s: %w", bouncerName, err) } return metrics, nil From 0a74de59f3d0d698396ec2a99128f287878019f9 Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 11 Jul 2024 13:18:02 +0200 Subject: [PATCH 077/119] footer with totals by origin; fix display of 0 values --- cmd/crowdsec-cli/climetrics/number.go | 2 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 43 ++++++++++++++++++---- test/bats/08_metrics_bouncer.bats | 18 +++++---- 3 files changed, 48 insertions(+), 15 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/number.go b/cmd/crowdsec-cli/climetrics/number.go index e8144a7f0c9..709b7cf853a 100644 --- a/cmd/crowdsec-cli/climetrics/number.go +++ b/cmd/crowdsec-cli/climetrics/number.go @@ -26,7 +26,7 @@ func formatNumber(num int64, withUnit bool) string { return strconv.FormatInt(num, 10) } - goodUnit := unit{} + goodUnit := ranges[len(ranges)-1] for _, u := range ranges { if num >= u.value { diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index bec014be18a..2ca9256d841 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -35,6 +35,7 @@ type statBouncer struct { // over them multiple times and keep the aggregation code simple rawMetrics []bouncerMetricItem aggregated map[string]map[string]map[string]map[string]int64 + aggregatedAllOrigin map[string]map[string]map[string]int64 } func (s *statBouncer) MarshalJSON() ([]byte, error) { @@ -158,6 +159,10 @@ func (s *statBouncer) aggregate() { s.aggregated = make(map[string]map[string]map[string]map[string]int64) } + if s.aggregatedAllOrigin == nil { + s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) + } + for _, raw := range s.rawMetrics { if _, ok := s.aggregated[raw.bouncerName]; !ok { s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) @@ -176,6 +181,20 @@ func (s *statBouncer) aggregate() { } s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName]; !ok { + s.aggregatedAllOrigin[raw.bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name]; !ok { + s.aggregatedAllOrigin[raw.bouncerName][raw.name] = make(map[string]int64) + } + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit]; !ok { + s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] = 0 + } + + s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] += int64(raw.value) } } @@ -186,19 +205,18 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm } // [bouncer][origin]; where origin=="" is the total - + for _, bouncerName := range maptools.SortedKeys(bouncerNames) { t := cstable.New(out, wantColor).Writer t.AppendHeader(table.Row{"Origin", "Bytes", "Bytes", "Packets", "Packets"}, table.RowConfig{AutoMerge: true}) t.AppendHeader(table.Row{"", "processed", "dropped", "processed", "dropped"}) t.SetColumnConfigs([]table.ColumnConfig{ - {Number:1, Align: text.AlignLeft}, - {Number:2, Align: text.AlignRight}, - {Number:3, Align: text.AlignRight}, - {Number:4, Align: text.AlignRight}, - {Number:5, Align: text.AlignRight}, + {Number:1, Align: text.AlignLeft, AlignFooter: text.AlignRight}, + {Number:2, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:3, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:4, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:5, Align: text.AlignRight, AlignFooter: text.AlignRight}, }) - // XXX: total of all origins // XXX: blocked_ips and other metrics numRows := 0 @@ -221,6 +239,17 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm numRows += 1 } + totals := s.aggregatedAllOrigin[bouncerName] + + t.AppendFooter( + table.Row{"Total", + formatNumber(totals["processed"]["byte"], !noUnit), + formatNumber(totals["dropped"]["byte"], !noUnit), + formatNumber(totals["processed"]["packet"], !noUnit), + formatNumber(totals["dropped"]["packet"], !noUnit), + }, + ) + if numRows > 0 || showEmpty { title, _ := s.Description() // don't use SetTitle() because it draws the title inside table box diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 407db7e8396..58af64b6693 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -125,7 +125,9 @@ teardown() { | Origin | Bytes | Packets | | | processed | dropped | processed | dropped | +--------+-----------+---------+-----------+---------+ - | | NaN | NaN | NaN | NaN | + | | 0 | 0 | 0 | 0 | + +--------+-----------+---------+-----------+---------+ + | Total | 0 | 0 | 0 | 0 | +--------+-----------+---------+-----------+---------+ EOT @@ -207,14 +209,16 @@ teardown() { | Origin | Bytes | Packets | | | processed | dropped | processed | dropped | +----------------------------------+-----------+---------+-----------+---------+ - | | NaN | NaN | NaN | NaN | - | CAPI | NaN | 3.80k | NaN | 100 | - | cscli | NaN | 380 | NaN | 10 | - | lists:firehol_cruzit_web_attacks | NaN | 1.03k | NaN | 23 | - | lists:firehol_voipbl | NaN | 3.85k | NaN | 58 | + | | 0 | 0 | 0 | 0 | + | CAPI | 0 | 3.80k | 0 | 100 | + | cscli | 0 | 380 | 0 | 10 | + | lists:firehol_cruzit_web_attacks | 0 | 1.03k | 0 | 23 | + | lists:firehol_voipbl | 0 | 3.85k | 0 | 58 | + +----------------------------------+-----------+---------+-----------+---------+ + | Total | 0 | 9.06k | 0 | 191 | +----------------------------------+-----------+---------+-----------+---------+ EOT - # TODO: total of all origin, multiple item lists, active decisions discard older values + # TODO: ultiple item lists, active decisions discard older values } From 50c772a73444ba2aa751cc4c7411874974cbcd1c Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 11 Jul 2024 14:43:02 +0200 Subject: [PATCH 078/119] add maxLength for some elements --- pkg/models/base_metrics.go | 5 +++++ pkg/models/localapi_swagger.yaml | 7 +++++++ pkg/models/metrics_detail_item.go | 10 ++++++++++ pkg/models/metrics_labels.go | 15 +++++++++++++++ pkg/models/o_sversion.go | 10 ++++++++++ 5 files changed, 47 insertions(+) diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go index 154d9004afe..94691ea233e 100644 --- a/pkg/models/base_metrics.go +++ b/pkg/models/base_metrics.go @@ -35,6 +35,7 @@ type BaseMetrics struct { // version of the remediation component // Required: true + // Max Length: 255 Version *string `json:"version"` } @@ -124,6 +125,10 @@ func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("version", "body", *m.Version, 255); err != nil { + return err + } + return nil } diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index d726f452a16..ba14880e7c5 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1095,6 +1095,7 @@ definitions: version: type: string description: version of the remediation component + maxLength: 255 os: $ref: '#/definitions/OSversion' metrics: @@ -1107,6 +1108,7 @@ definitions: items: type: string description: feature flags (expected to be empty for remediation components) + maxLength: 255 utc_startup_timestamp: type: integer description: UTC timestamp of the startup of the software @@ -1120,9 +1122,11 @@ definitions: name: type: string description: name of the OS + maxLength: 255 version: type: string description: version of the OS + maxLength: 255 required: - name - version @@ -1146,12 +1150,14 @@ definitions: name: type: string description: name of the metric + maxLength: 255 value: type: number description: value of the metric unit: type: string description: unit of the metric + maxLength: 255 labels: $ref: '#/definitions/MetricsLabels' description: labels of the metric @@ -1178,6 +1184,7 @@ definitions: additionalProperties: type: string description: label of the metric + maxLength: 255 ConsoleOptions: title: ConsoleOptions type: array diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go index 889f7e263d2..bb237884fcf 100644 --- a/pkg/models/metrics_detail_item.go +++ b/pkg/models/metrics_detail_item.go @@ -24,10 +24,12 @@ type MetricsDetailItem struct { // name of the metric // Required: true + // Max Length: 255 Name *string `json:"name"` // unit of the metric // Required: true + // Max Length: 255 Unit *string `json:"unit"` // value of the metric @@ -86,6 +88,10 @@ func (m *MetricsDetailItem) validateName(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("name", "body", *m.Name, 255); err != nil { + return err + } + return nil } @@ -95,6 +101,10 @@ func (m *MetricsDetailItem) validateUnit(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("unit", "body", *m.Unit, 255); err != nil { + return err + } + return nil } diff --git a/pkg/models/metrics_labels.go b/pkg/models/metrics_labels.go index d807a88bc8d..176a15cce24 100644 --- a/pkg/models/metrics_labels.go +++ b/pkg/models/metrics_labels.go @@ -8,7 +8,9 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" ) // MetricsLabels MetricsLabels @@ -18,6 +20,19 @@ type MetricsLabels map[string]string // Validate validates this metrics labels func (m MetricsLabels) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.MaxLength(k, "body", m[k], 255); err != nil { + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } return nil } diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go index eb670409c90..8f1f43ea9cc 100644 --- a/pkg/models/o_sversion.go +++ b/pkg/models/o_sversion.go @@ -21,10 +21,12 @@ type OSversion struct { // name of the OS // Required: true + // Max Length: 255 Name *string `json:"name"` // version of the OS // Required: true + // Max Length: 255 Version *string `json:"version"` } @@ -52,6 +54,10 @@ func (m *OSversion) validateName(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("name", "body", *m.Name, 255); err != nil { + return err + } + return nil } @@ -61,6 +67,10 @@ func (m *OSversion) validateVersion(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("version", "body", *m.Version, 255); err != nil { + return err + } + return nil } From 69f7a4e18207a76ba30e7d9060fd7cb4e461875b Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 11 Jul 2024 15:03:10 +0200 Subject: [PATCH 079/119] revert "db: round created, updated... timestamps to 1 second" to fix decision stream startup=true --- pkg/types/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 384dd00367e..712d44ba12d 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -66,7 +66,7 @@ func ConfigureLogger(clog *log.Logger) error { } func UtcNow() time.Time { - return time.Now().UTC().Round(time.Second) + return time.Now().UTC() } func IsNetworkFS(path string) (bool, string, error) { From 8c73c1ae6af215d1e2f6c3f08c469d7b55573536 Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 11 Jul 2024 17:06:38 +0200 Subject: [PATCH 080/119] table formatting --- cmd/crowdsec-cli/climetrics/statbouncer.go | 22 ++++++------ test/bats/08_metrics_bouncer.bats | 40 +++++++++++----------- test/bats/99_lapi-stream-mode.bats | 2 -- 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 2ca9256d841..f44f316098f 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -208,14 +208,14 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm for _, bouncerName := range maptools.SortedKeys(bouncerNames) { t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Origin", "Bytes", "Bytes", "Packets", "Packets"}, table.RowConfig{AutoMerge: true}) - t.AppendHeader(table.Row{"", "processed", "dropped", "processed", "dropped"}) + t.AppendHeader(table.Row{"Origin", "dropped", "dropped", "processed", "processed"}, table.RowConfig{AutoMerge: true}) + t.AppendHeader(table.Row{"", "bytes", "packets", "bytes", "packets"}) t.SetColumnConfigs([]table.ColumnConfig{ - {Number:1, Align: text.AlignLeft, AlignFooter: text.AlignRight}, - {Number:2, Align: text.AlignRight, AlignFooter: text.AlignRight}, - {Number:3, Align: text.AlignRight, AlignFooter: text.AlignRight}, - {Number:4, Align: text.AlignRight, AlignFooter: text.AlignRight}, - {Number:5, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:1, AlignHeader: text.AlignLeft, Align: text.AlignLeft, AlignFooter: text.AlignRight,}, + {Number:2, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:3, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:4, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, + {Number:5, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, }) // XXX: blocked_ips and other metrics @@ -229,10 +229,10 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm metrics := s.aggregated[bouncerName][origin] t.AppendRow( table.Row{origin, - formatNumber(metrics["processed"]["byte"], !noUnit), formatNumber(metrics["dropped"]["byte"], !noUnit), - formatNumber(metrics["processed"]["packet"], !noUnit), formatNumber(metrics["dropped"]["packet"], !noUnit), + formatNumber(metrics["processed"]["byte"], !noUnit), + formatNumber(metrics["processed"]["packet"], !noUnit), }, ) @@ -243,10 +243,10 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm t.AppendFooter( table.Row{"Total", - formatNumber(totals["processed"]["byte"], !noUnit), formatNumber(totals["dropped"]["byte"], !noUnit), - formatNumber(totals["processed"]["packet"], !noUnit), formatNumber(totals["dropped"]["packet"], !noUnit), + formatNumber(totals["processed"]["byte"], !noUnit), + formatNumber(totals["processed"]["packet"], !noUnit), }, ) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 58af64b6693..356292d5b72 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -121,14 +121,14 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (testbouncer): - +--------+---------------------+---------------------+ - | Origin | Bytes | Packets | - | | processed | dropped | processed | dropped | - +--------+-----------+---------+-----------+---------+ - | | 0 | 0 | 0 | 0 | - +--------+-----------+---------+-----------+---------+ - | Total | 0 | 0 | 0 | 0 | - +--------+-----------+---------+-----------+---------+ + +--------+-------------------+-----------------------+ + | Origin | dropped | processed | + | | bytes | packets | bytes | packets | + +--------+---------+---------+-----------+-----------+ + | | 0 | 0 | 0 | 0 | + +--------+---------+---------+-----------+-----------+ + | Total | 0 | 0 | 0 | 0 | + +--------+---------+---------+-----------+-----------+ EOT # some more realistic values, at least for the labels @@ -205,18 +205,18 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (testbouncer): - +----------------------------------+---------------------+---------------------+ - | Origin | Bytes | Packets | - | | processed | dropped | processed | dropped | - +----------------------------------+-----------+---------+-----------+---------+ - | | 0 | 0 | 0 | 0 | - | CAPI | 0 | 3.80k | 0 | 100 | - | cscli | 0 | 380 | 0 | 10 | - | lists:firehol_cruzit_web_attacks | 0 | 1.03k | 0 | 23 | - | lists:firehol_voipbl | 0 | 3.85k | 0 | 58 | - +----------------------------------+-----------+---------+-----------+---------+ - | Total | 0 | 9.06k | 0 | 191 | - +----------------------------------+-----------+---------+-----------+---------+ + +----------------------------------+-------------------+-----------------------+ + | Origin | dropped | processed | + | | bytes | packets | bytes | packets | + +----------------------------------+---------+---------+-----------+-----------+ + | | 0 | 0 | 0 | 0 | + | CAPI | 3.80k | 100 | 0 | 0 | + | cscli | 380 | 10 | 0 | 0 | + | lists:firehol_cruzit_web_attacks | 1.03k | 23 | 0 | 0 | + | lists:firehol_voipbl | 3.85k | 58 | 0 | 0 | + +----------------------------------+---------+---------+-----------+-----------+ + | Total | 9.06k | 191 | 0 | 0 | + +----------------------------------+---------+---------+-----------+-----------+ EOT # TODO: ultiple item lists, active decisions discard older values diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index b3ee8a434ff..f6a5907e6e1 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -32,7 +32,6 @@ setup() { @test "stream start" { rune -0 curl-with-key "/v1/decisions/stream?startup=true" - if is_db_mysql; then sleep 3; fi rune -0 jq -r '.new' <(output) assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' assert_output --partial '1.2.3.4' @@ -41,7 +40,6 @@ setup() { @test "stream cont (add)" { rune -0 cscli decisions add -i '1.2.3.5' - if is_db_mysql; then sleep 3; fi rune -0 curl-with-key "/v1/decisions/stream" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.5' From daf7a13c768025b8c4b07d71820666611a39282d Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 10:30:39 +0200 Subject: [PATCH 081/119] bouncer stats: dynamic columns --- cmd/crowdsec-cli/climetrics/statbouncer.go | 84 ++++++++++++++-------- test/bats/08_metrics_bouncer.bats | 43 ++++++----- 2 files changed, 77 insertions(+), 50 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index f44f316098f..36ec2160941 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -204,51 +204,79 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm bouncerNames[item.bouncerName] = true } - // [bouncer][origin]; where origin=="" is the total + columns := make(map[string]map[string]bool) + for _, item := range s.rawMetrics { + // build a map of the metric names and units, to display dynamic columns + if _, ok := columns[item.name]; !ok { + columns[item.name] = make(map[string]bool) + } + columns[item.name][item.unit] = true + } for _, bouncerName := range maptools.SortedKeys(bouncerNames) { t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Origin", "dropped", "dropped", "processed", "processed"}, table.RowConfig{AutoMerge: true}) - t.AppendHeader(table.Row{"", "bytes", "packets", "bytes", "packets"}) - t.SetColumnConfigs([]table.ColumnConfig{ - {Number:1, AlignHeader: text.AlignLeft, Align: text.AlignLeft, AlignFooter: text.AlignRight,}, - {Number:2, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, - {Number:3, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, - {Number:4, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, - {Number:5, AlignHeader: text.AlignCenter, Align: text.AlignRight, AlignFooter: text.AlignRight}, - }) - // XXX: blocked_ips and other metrics + header1 := table.Row{"Origin"} + header2 := table.Row{""} + colNum := 1 + + colCfg := []table.ColumnConfig{{ + Number:colNum, + AlignHeader: + text.AlignLeft, + Align: text.AlignLeft, + AlignFooter: text.AlignRight, + }} + + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + println(name, unit) + colNum += 1 + header1 = append(header1, name) + header2 = append(header2, unit) + colCfg = append(colCfg, table.ColumnConfig{ + Number: colNum, + AlignHeader: text.AlignCenter, + Align: text.AlignRight, + AlignFooter: text.AlignRight}, + ) + } + } + + t.AppendHeader(header1, table.RowConfig{AutoMerge: true}) + t.AppendHeader(header2) + + t.SetColumnConfigs(colCfg) numRows := 0 // we print one table per bouncer only if it has stats, so "showEmpty" has no effect // unless we want a global table for all bouncers - // sort origins for stable output + // sort all the ranges for stable output + for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { metrics := s.aggregated[bouncerName][origin] - t.AppendRow( - table.Row{origin, - formatNumber(metrics["dropped"]["byte"], !noUnit), - formatNumber(metrics["dropped"]["packet"], !noUnit), - formatNumber(metrics["processed"]["byte"], !noUnit), - formatNumber(metrics["processed"]["packet"], !noUnit), - }, - ) + row := table.Row{origin} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + row = append(row, formatNumber(metrics[name][unit], !noUnit)) + } + } + t.AppendRow(row) numRows += 1 } totals := s.aggregatedAllOrigin[bouncerName] - t.AppendFooter( - table.Row{"Total", - formatNumber(totals["dropped"]["byte"], !noUnit), - formatNumber(totals["dropped"]["packet"], !noUnit), - formatNumber(totals["processed"]["byte"], !noUnit), - formatNumber(totals["processed"]["packet"], !noUnit), - }, - ) + footer := table.Row{"Total"} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + footer = append(footer, formatNumber(totals[name][unit], !noUnit)) + } + } + + t.AppendFooter(footer) if numRows > 0 || showEmpty { title, _ := s.Description() diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 356292d5b72..5e524f30c37 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -117,18 +117,17 @@ teardown() { # but without labels the origin string is empty assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' - # TODO: adjust the table output rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (testbouncer): - +--------+-------------------+-----------------------+ - | Origin | dropped | processed | - | | bytes | packets | bytes | packets | - +--------+---------+---------+-----------+-----------+ - | | 0 | 0 | 0 | 0 | - +--------+---------+---------+-----------+-----------+ - | Total | 0 | 0 | 0 | 0 | - +--------+---------+---------+-----------+-----------+ + +--------+-----------------+ + | Origin | foo | + | | dogyear | pound | + +--------+---------+-------+ + | | 2 | 5 | + +--------+---------+-------+ + | Total | 2 | 5 | + +--------+---------+-------+ EOT # some more realistic values, at least for the labels @@ -205,20 +204,20 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (testbouncer): - +----------------------------------+-------------------+-----------------------+ - | Origin | dropped | processed | - | | bytes | packets | bytes | packets | - +----------------------------------+---------+---------+-----------+-----------+ - | | 0 | 0 | 0 | 0 | - | CAPI | 3.80k | 100 | 0 | 0 | - | cscli | 380 | 10 | 0 | 0 | - | lists:firehol_cruzit_web_attacks | 1.03k | 23 | 0 | 0 | - | lists:firehol_voipbl | 3.85k | 58 | 0 | 0 | - +----------------------------------+---------+---------+-----------+-----------+ - | Total | 9.06k | 191 | 0 | 0 | - +----------------------------------+---------+---------+-----------+-----------+ + +----------------------------------+------------------+-------------------+-----------------+ + | Origin | active_decisions | dropped | foo | + | | ip | byte | packet | dogyear | pound | + +----------------------------------+------------------+---------+---------+---------+-------+ + | | 0 | 0 | 0 | 2 | 5 | + | CAPI | 0 | 3.80k | 100 | 0 | 0 | + | cscli | 1 | 380 | 10 | 0 | 0 | + | lists:firehol_cruzit_web_attacks | 0 | 1.03k | 23 | 0 | 0 | + | lists:firehol_voipbl | 51.94k | 3.85k | 58 | 0 | 0 | + +----------------------------------+------------------+---------+---------+---------+-------+ + | Total | 51.94k | 9.06k | 191 | 2 | 5 | + +----------------------------------+------------------+---------+---------+---------+-------+ EOT - # TODO: ultiple item lists, active decisions discard older values + # TODO: multiple item lists, multiple bouncers } From 9af00559de0efa3116f7df1148d99099eaa4cb58 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 10:32:12 +0200 Subject: [PATCH 082/119] removed temporary hack --- cmd/crowdsec-cli/climetrics/statbouncer.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 36ec2160941..58d1545dadf 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "strings" "time" "github.com/jedib0t/go-pretty/v6/table" @@ -126,14 +125,6 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { unit := *item.Unit value := *item.Value - if unit == "byte" { - name = strings.TrimSuffix(name, "_bytes") - } - - if unit == "packet" { - name = strings.TrimSuffix(name, "_packets") - } - rawMetric := bouncerMetricItem{ bouncerName: bouncerName, ipType: labels["ip_type"], From 987f1557dec151f6bfb74d01c480e60c43f57c21 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 11:17:24 +0200 Subject: [PATCH 083/119] fix multiple bouncer tables --- cmd/crowdsec-cli/climetrics/statbouncer.go | 21 +++-- test/bats/08_metrics_bouncer.bats | 98 +++++++++++++++++++++- 2 files changed, 107 insertions(+), 12 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 58d1545dadf..130f4745d28 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -195,16 +195,20 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm bouncerNames[item.bouncerName] = true } - columns := make(map[string]map[string]bool) - for _, item := range s.rawMetrics { - // build a map of the metric names and units, to display dynamic columns - if _, ok := columns[item.name]; !ok { - columns[item.name] = make(map[string]bool) - } - columns[item.name][item.unit] = true - } for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + columns := make(map[string]map[string]bool) + for _, item := range s.rawMetrics { + if item.bouncerName != bouncerName { + continue + } + // build a map of the metric names and units, to display dynamic columns + if _, ok := columns[item.name]; !ok { + columns[item.name] = make(map[string]bool) + } + columns[item.name][item.unit] = true + } + t := cstable.New(out, wantColor).Writer header1 := table.Row{"Origin"} header2 := table.Row{""} @@ -220,7 +224,6 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { - println(name, unit) colNum += 1 header1 = append(header1, name) header2 = append(header2, unit) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 5e524f30c37..467cac10603 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -51,7 +51,7 @@ teardown() { payload=$(yq -o j <<-EOT remediation_components: - - version: "v1.0" + - version: "v1.0" log_processors: [] EOT ) @@ -82,8 +82,8 @@ teardown() { payload=$(yq -o j <<-EOT remediation_components: - - version: "v1.0" - utc_startup_timestamp: 1707399316 + - version: "v1.0" + utc_startup_timestamp: 1707399316 log_processors: [] EOT ) @@ -221,3 +221,95 @@ teardown() { # TODO: multiple item lists, multiple bouncers } + +@test "rc usage metrics (multiple bouncers)" { + # multiple bouncers have separate totals and can have different types of metrics and units -> different columns + + API_KEY=$(cscli bouncers add bouncer1 -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + metrics: + - meta: + utc_now_timestamp: 1707399316 + window_size_seconds: 600 + items: + - name: dropped + unit: byte + value: 1000 + labels: + origin: capi + - name: processed + unit: packet + value: 100 + labels: + origin: capi + - name: processed + unit: packet + value: 100 + labels: + origin: lists:somelist + EOT + ) + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + + API_KEY=$(cscli bouncers add bouncer2 -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + metrics: + - meta: + utc_now_timestamp: 1707399316 + window_size_seconds: 600 + items: + - name: dropped + unit: byte + value: 1500 + - name: dropped + unit: byte + value: 2000 + labels: + origin: capi + - name: dropped + unit: packet + value: 20 + EOT + ) + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers:{bouncer1:{capi:{dropped:{byte:1000},processed:{packet:100}},"lists:somelist":{processed:{packet:100}}},bouncer2:{"":{dropped:{byte:1500,packet:20}},capi:{dropped:{byte:2000}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (bouncer1): + +----------------+---------+-----------+ + | Origin | dropped | processed | + | | byte | packet | + +----------------+---------+-----------+ + | capi | 1.00k | 100 | + | lists:somelist | 0 | 100 | + +----------------+---------+-----------+ + | Total | 1.00k | 200 | + +----------------+---------+-----------+ + Bouncer Metrics (bouncer2): + +--------+-------------------+ + | Origin | dropped | + | | byte | packet | + +--------+---------+---------+ + | | 1.50k | 20 | + | capi | 2.00k | 0 | + +--------+---------+---------+ + | Total | 3.50k | 20 | + +--------+---------+---------+ + EOT +} From 2f97c3bd13be6b7e8b0c4387e2238d64ef5a224b Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 11:59:08 +0200 Subject: [PATCH 084/119] extract method bouncerTable(), separate tables with newline --- cmd/crowdsec-cli/climetrics/statbouncer.go | 157 ++++++++++++--------- 1 file changed, 88 insertions(+), 69 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 130f4745d28..ba9df948356 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -189,95 +189,114 @@ func (s *statBouncer) aggregate() { } } -func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - bouncerNames := make(map[string]bool) +// bouncerTable displays a table of metrics for a single bouncer +func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { + columns := make(map[string]map[string]bool) for _, item := range s.rawMetrics { - bouncerNames[item.bouncerName] = true + if item.bouncerName != bouncerName { + continue + } + // build a map of the metric names and units, to display dynamic columns + if _, ok := columns[item.name]; !ok { + columns[item.name] = make(map[string]bool) + } + columns[item.name][item.unit] = true } + // no metrics for this bouncer, skip. how did we get here ? + // anyway we can't honor the "showEmpty" flag in this case, + // we don't heven have the table headers - for _, bouncerName := range maptools.SortedKeys(bouncerNames) { - columns := make(map[string]map[string]bool) - for _, item := range s.rawMetrics { - if item.bouncerName != bouncerName { - continue - } - // build a map of the metric names and units, to display dynamic columns - if _, ok := columns[item.name]; !ok { - columns[item.name] = make(map[string]bool) - } - columns[item.name][item.unit] = true + if len(columns) == 0 { + return + } + + t := cstable.New(out, wantColor).Writer + header1 := table.Row{"Origin"} + header2 := table.Row{""} + colNum := 1 + + colCfg := []table.ColumnConfig{{ + Number:colNum, + AlignHeader: + text.AlignLeft, + Align: text.AlignLeft, + AlignFooter: text.AlignRight, + }} + + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + colNum += 1 + header1 = append(header1, name) + header2 = append(header2, unit) + colCfg = append(colCfg, table.ColumnConfig{ + Number: colNum, + AlignHeader: text.AlignCenter, + Align: text.AlignRight, + AlignFooter: text.AlignRight}, + ) } + } + + t.AppendHeader(header1, table.RowConfig{AutoMerge: true}) + t.AppendHeader(header2) + + t.SetColumnConfigs(colCfg) - t := cstable.New(out, wantColor).Writer - header1 := table.Row{"Origin"} - header2 := table.Row{""} - colNum := 1 + numRows := 0 - colCfg := []table.ColumnConfig{{ - Number:colNum, - AlignHeader: - text.AlignLeft, - Align: text.AlignLeft, - AlignFooter: text.AlignRight, - }} + // sort all the ranges for stable output + for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + metrics := s.aggregated[bouncerName][origin] + row := table.Row{origin} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { - colNum += 1 - header1 = append(header1, name) - header2 = append(header2, unit) - colCfg = append(colCfg, table.ColumnConfig{ - Number: colNum, - AlignHeader: text.AlignCenter, - Align: text.AlignRight, - AlignFooter: text.AlignRight}, - ) + row = append(row, formatNumber(metrics[name][unit], !noUnit)) } } + t.AppendRow(row) - t.AppendHeader(header1, table.RowConfig{AutoMerge: true}) - t.AppendHeader(header2) - - t.SetColumnConfigs(colCfg) - - numRows := 0 - - // we print one table per bouncer only if it has stats, so "showEmpty" has no effect - // unless we want a global table for all bouncers - - // sort all the ranges for stable output + numRows += 1 + } - for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { - metrics := s.aggregated[bouncerName][origin] - row := table.Row{origin} - for _, name := range maptools.SortedKeys(columns) { - for _, unit := range maptools.SortedKeys(columns[name]) { - row = append(row, formatNumber(metrics[name][unit], !noUnit)) - } - } - t.AppendRow(row) + totals := s.aggregatedAllOrigin[bouncerName] - numRows += 1 + footer := table.Row{"Total"} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + footer = append(footer, formatNumber(totals[name][unit], !noUnit)) } + } - totals := s.aggregatedAllOrigin[bouncerName] + t.AppendFooter(footer) - footer := table.Row{"Total"} - for _, name := range maptools.SortedKeys(columns) { - for _, unit := range maptools.SortedKeys(columns[name]) { - footer = append(footer, formatNumber(totals[name][unit], !noUnit)) - } - } + if numRows == 0 { + // this happens only if we decide to discard some data in the loop + return + } + + title, _ := s.Description() + // don't use SetTitle() because it draws the title inside table box + // TODO: newline position wrt other stat tables + cstable.RenderTitle(out, fmt.Sprintf("%s (%s):", title, bouncerName)) + fmt.Fprintln(out, t.Render()) +} - t.AppendFooter(footer) +// Table displays a table of metrics for each bouncer +func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { + bouncerNames := make(map[string]bool) + for _, item := range s.rawMetrics { + bouncerNames[item.bouncerName] = true + } - if numRows > 0 || showEmpty { - title, _ := s.Description() - // don't use SetTitle() because it draws the title inside table box - // TODO: newline position wrt other stat tables - cstable.RenderTitle(out, fmt.Sprintf("%s (%s):", title, bouncerName)) - fmt.Fprintln(out, t.Render()) + nl := false + for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + if nl { + // empty line between tables + fmt.Fprintln(out) } + s.bouncerTable(out, bouncerName, wantColor, noUnit) + nl = true } } From adc86c29baee89f98490f86c32ab0eb0a9285851 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 12:50:52 +0200 Subject: [PATCH 085/119] continue loop if metric row can't be parsed --- cmd/crowdsec-cli/climetrics/statbouncer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index ba9df948356..097186391d4 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -91,6 +91,7 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { err := json.Unmarshal([]byte(met.Payload), &payload) if err != nil { log.Warningf("while parsing metrics: %s", err) + continue } for _, m := range payload.Metrics { From 2d7b8e7e55c787961f91c0a2b6bc0e246266d6f4 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 12:51:19 +0200 Subject: [PATCH 086/119] show earliest collection date for each bouncer --- cmd/crowdsec-cli/climetrics/statbouncer.go | 22 +++++++++++++++------- test/bats/08_metrics_bouncer.bats | 15 ++++++++------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 097186391d4..a6e97ea7224 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -30,6 +30,8 @@ type bouncerMetricItem struct { } type statBouncer struct { + // oldest collection timestamp for each bouncer + oldestTS map[string]*time.Time // we keep de-normalized metrics so we can iterate // over them multiple times and keep the aggregation code simple rawMetrics []bouncerMetricItem @@ -68,20 +70,19 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { return fmt.Errorf("unable to fetch metrics: %w", err) } - // keep track of oldest collection timestamp - var since *time.Time + s.oldestTS = make(map[string]*time.Time) // don't spam the user with the same warnings warningsLogged := make(map[string]bool) for _, met := range metrics { + bouncerName := met.GeneratedBy + collectedAt := met.CollectedAt - if since == nil || collectedAt.Before(*since) { - since = &collectedAt + if s.oldestTS[bouncerName] == nil || collectedAt.Before(*s.oldestTS[bouncerName]) { + s.oldestTS[bouncerName] = &collectedAt } - bouncerName := met.GeneratedBy - type bouncerMetrics struct { Metrics []models.DetailedMetrics `json:"metrics"` } @@ -278,9 +279,16 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } title, _ := s.Description() + title = fmt.Sprintf("%s (%s)", title, bouncerName) + if s.oldestTS != nil { + // if we change this to .Local() beware of tests + title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) + } + title += ":" + // don't use SetTitle() because it draws the title inside table box // TODO: newline position wrt other stat tables - cstable.RenderTitle(out, fmt.Sprintf("%s (%s):", title, bouncerName)) + cstable.RenderTitle(out, title) fmt.Fprintln(out, t.Render()) } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 467cac10603..8bd0ef106c8 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -119,7 +119,7 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer): + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: +--------+-----------------+ | Origin | foo | | | dogyear | pound | @@ -203,7 +203,7 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer): + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | ip | byte | packet | dogyear | pound | @@ -231,7 +231,7 @@ teardown() { payload=$(yq -o j <<-EOT remediation_components: - version: "v1.0" - utc_startup_timestamp: 1707399316 + utc_startup_timestamp: 1707369316 metrics: - meta: utc_now_timestamp: 1707399316 @@ -264,10 +264,10 @@ teardown() { payload=$(yq -o j <<-EOT remediation_components: - version: "v1.0" - utc_startup_timestamp: 1707399316 + utc_startup_timestamp: 1707379316 metrics: - meta: - utc_now_timestamp: 1707399316 + utc_now_timestamp: 1707389316 window_size_seconds: 600 items: - name: dropped @@ -291,7 +291,7 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (bouncer1): + Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: +----------------+---------+-----------+ | Origin | dropped | processed | | | byte | packet | @@ -301,7 +301,8 @@ teardown() { +----------------+---------+-----------+ | Total | 1.00k | 200 | +----------------+---------+-----------+ - Bouncer Metrics (bouncer2): + + Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: +--------+-------------------+ | Origin | dropped | | | byte | packet | From 11bba5ef47eaf29b5f73a07dfe5f2fa5193d799d Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 13:55:44 +0200 Subject: [PATCH 087/119] func tests: add back sleep for mysql --- cmd/crowdsec-cli/climetrics/statbouncer.go | 2 +- test/bats/99_lapi-stream-mode.bats | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index a6e97ea7224..52afb89784e 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -91,7 +91,7 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { err := json.Unmarshal([]byte(met.Payload), &payload) if err != nil { - log.Warningf("while parsing metrics: %s", err) + log.Warningf("while parsing metrics for %s: %s", bouncerName, err) continue } diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index f6a5907e6e1..b3ee8a434ff 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -32,6 +32,7 @@ setup() { @test "stream start" { rune -0 curl-with-key "/v1/decisions/stream?startup=true" + if is_db_mysql; then sleep 3; fi rune -0 jq -r '.new' <(output) assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' assert_output --partial '1.2.3.4' @@ -40,6 +41,7 @@ setup() { @test "stream cont (add)" { rune -0 cscli decisions add -i '1.2.3.5' + if is_db_mysql; then sleep 3; fi rune -0 curl-with-key "/v1/decisions/stream" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.5' From e4cbed1807e60800656c613fe9f2089cbe6ae1e7 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 14:05:48 +0200 Subject: [PATCH 088/119] detect os when in docker, report ex. osname = "Debian (docker)" --- cmd/crowdsec/lpmetrics.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index afb3af99fe4..1ab19e8e8d6 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -89,15 +89,15 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou } func detectOS() (string, string) { - if version.System == "docker" { - return "docker", "" - } - osInfo, err := osinfo.GetOSInfo() if err != nil { return version.System, "???" } + if osInfo.Name != "" && version.System == "docker" { + return osInfo.Name + " (docker)", osInfo.Version + } + return osInfo.Name, osInfo.Version } From 3365371a7d9f920d8d9ea73e21c49223c4ec466c Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 15:01:30 +0200 Subject: [PATCH 089/119] describe CAPI as CAPI (community blocklist) in table output only (not json) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 6 +++ test/bats/08_metrics_bouncer.bats | 49 +++++++++++----------- 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 52afb89784e..3cca115328f 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -251,6 +251,12 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { metrics := s.aggregated[bouncerName][origin] + + // some users don't know what capi is + if origin == "CAPI" { + origin += " (community blocklist)" + } + row := table.Row{origin} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 8bd0ef106c8..c30ac9a87a3 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -209,7 +209,7 @@ teardown() { | | ip | byte | packet | dogyear | pound | +----------------------------------+------------------+---------+---------+---------+-------+ | | 0 | 0 | 0 | 2 | 5 | - | CAPI | 0 | 3.80k | 100 | 0 | 0 | + | CAPI (community blocklist) | 0 | 3.80k | 100 | 0 | 0 | | cscli | 1 | 380 | 10 | 0 | 0 | | lists:firehol_cruzit_web_attacks | 0 | 1.03k | 23 | 0 | 0 | | lists:firehol_voipbl | 51.94k | 3.85k | 58 | 0 | 0 | @@ -218,7 +218,7 @@ teardown() { +----------------------------------+------------------+---------+---------+---------+-------+ EOT - # TODO: multiple item lists, multiple bouncers + # TODO: multiple item lists } @@ -241,12 +241,12 @@ teardown() { unit: byte value: 1000 labels: - origin: capi + origin: CAPI - name: processed unit: packet value: 100 labels: - origin: capi + origin: CAPI - name: processed unit: packet value: 100 @@ -257,7 +257,6 @@ teardown() { rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" - API_KEY=$(cscli bouncers add bouncer2 -o raw) export API_KEY @@ -277,7 +276,7 @@ teardown() { unit: byte value: 2000 labels: - origin: capi + origin: CAPI - name: dropped unit: packet value: 20 @@ -287,30 +286,30 @@ teardown() { rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" rune -0 cscli metrics show bouncers -o json - assert_json '{bouncers:{bouncer1:{capi:{dropped:{byte:1000},processed:{packet:100}},"lists:somelist":{processed:{packet:100}}},bouncer2:{"":{dropped:{byte:1500,packet:20}},capi:{dropped:{byte:2000}}}}}' + assert_json '{bouncers:{bouncer1:{CAPI:{dropped:{byte:1000},processed:{packet:100}},"lists:somelist":{processed:{packet:100}}},bouncer2:{"":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: - +----------------+---------+-----------+ - | Origin | dropped | processed | - | | byte | packet | - +----------------+---------+-----------+ - | capi | 1.00k | 100 | - | lists:somelist | 0 | 100 | - +----------------+---------+-----------+ - | Total | 1.00k | 200 | - +----------------+---------+-----------+ + +----------------------------+---------+-----------+ + | Origin | dropped | processed | + | | byte | packet | + +----------------------------+---------+-----------+ + | CAPI (community blocklist) | 1.00k | 100 | + | lists:somelist | 0 | 100 | + +----------------------------+---------+-----------+ + | Total | 1.00k | 200 | + +----------------------------+---------+-----------+ Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: - +--------+-------------------+ - | Origin | dropped | - | | byte | packet | - +--------+---------+---------+ - | | 1.50k | 20 | - | capi | 2.00k | 0 | - +--------+---------+---------+ - | Total | 3.50k | 20 | - +--------+---------+---------+ + +----------------------------+-------------------+ + | Origin | dropped | + | | byte | packet | + +----------------------------+---------+---------+ + | | 1.50k | 20 | + | CAPI (community blocklist) | 2.00k | 0 | + +----------------------------+---------+---------+ + | Total | 3.50k | 20 | + +----------------------------+---------+---------+ EOT } From 5e075603f10f3976e6fcb42e0167f942addebe52 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 15:45:42 +0200 Subject: [PATCH 090/119] display metrics without origin only in the total line --- cmd/crowdsec-cli/climetrics/statbouncer.go | 16 +++++--- test/bats/08_metrics_bouncer.bats | 44 +++++++++++----------- 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 3cca115328f..e0d43dc193b 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -250,6 +250,13 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor // sort all the ranges for stable output for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + if origin == "" { + // if the metric has no origin (i.e. processed bytes/packets) + // we don't display it in the table body but it still gets aggreagted + // in the footer's totals + continue + } + metrics := s.aggregated[bouncerName][origin] // some users don't know what capi is @@ -270,6 +277,10 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor totals := s.aggregatedAllOrigin[bouncerName] + if numRows == 0 { + t.Style().Options.SeparateFooter = false + } + footer := table.Row{"Total"} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { @@ -279,11 +290,6 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor t.AppendFooter(footer) - if numRows == 0 { - // this happens only if we decide to discard some data in the loop - return - } - title, _ := s.Description() title = fmt.Sprintf("%s (%s)", title, bouncerName) if s.oldestTS != nil { diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index c30ac9a87a3..4d25b055548 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -124,8 +124,6 @@ teardown() { | Origin | foo | | | dogyear | pound | +--------+---------+-------+ - | | 2 | 5 | - +--------+---------+-------+ | Total | 2 | 5 | +--------+---------+-------+ EOT @@ -208,7 +206,6 @@ teardown() { | Origin | active_decisions | dropped | foo | | | ip | byte | packet | dogyear | pound | +----------------------------------+------------------+---------+---------+---------+-------+ - | | 0 | 0 | 0 | 2 | 5 | | CAPI (community blocklist) | 0 | 3.80k | 100 | 0 | 0 | | cscli | 1 | 380 | 10 | 0 | 0 | | lists:firehol_cruzit_web_attacks | 0 | 1.03k | 23 | 0 | 0 | @@ -242,16 +239,17 @@ teardown() { value: 1000 labels: origin: CAPI - - name: processed - unit: packet - value: 100 + - name: dropped + unit: byte + value: 800 labels: - origin: CAPI + origin: lists:somelist + - name: processed + unit: bytes + value: 12340 - name: processed unit: packet value: 100 - labels: - origin: lists:somelist EOT ) @@ -272,6 +270,8 @@ teardown() { - name: dropped unit: byte value: 1500 + labels: + origin: lists:somelist - name: dropped unit: byte value: 2000 @@ -280,34 +280,36 @@ teardown() { - name: dropped unit: packet value: 20 + labels: + origin: lists:somelist EOT ) rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" rune -0 cscli metrics show bouncers -o json - assert_json '{bouncers:{bouncer1:{CAPI:{dropped:{byte:1000},processed:{packet:100}},"lists:somelist":{processed:{packet:100}}},bouncer2:{"":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' + assert_json '{bouncers:{bouncer1:{"":{processed:{bytes:12340,packet:100}},CAPI:{dropped:{byte:1000}},"lists:somelist":{dropped:{byte:800}}},bouncer2:{"lists:somelist":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: - +----------------------------+---------+-----------+ - | Origin | dropped | processed | - | | byte | packet | - +----------------------------+---------+-----------+ - | CAPI (community blocklist) | 1.00k | 100 | - | lists:somelist | 0 | 100 | - +----------------------------+---------+-----------+ - | Total | 1.00k | 200 | - +----------------------------+---------+-----------+ - + +----------------------------+---------+-----------------------+ + | Origin | dropped | processed | + | | byte | bytes | packet | + +----------------------------+---------+-----------+-----------+ + | CAPI (community blocklist) | 1.00k | 0 | 0 | + | lists:somelist | 800 | 0 | 0 | + +----------------------------+---------+-----------+-----------+ + | Total | 1.80k | 12.34k | 100 | + +----------------------------+---------+-----------+-----------+ + Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: +----------------------------+-------------------+ | Origin | dropped | | | byte | packet | +----------------------------+---------+---------+ - | | 1.50k | 20 | | CAPI (community blocklist) | 2.00k | 0 | + | lists:somelist | 1.50k | 20 | +----------------------------+---------+---------+ | Total | 3.50k | 20 | +----------------------------+---------+---------+ From 8e4622f8862bb1c24b18cfcdc8449f5725514832 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 16:08:25 +0200 Subject: [PATCH 091/119] display "-" for missing data (like per-origin processed bytes/packets) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 7 +- test/bats/08_metrics_bouncer.bats | 97 ++++++++++++---------- 2 files changed, 59 insertions(+), 45 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index e0d43dc193b..33c2e82b8b4 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -267,7 +267,12 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor row := table.Row{origin} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { - row = append(row, formatNumber(metrics[name][unit], !noUnit)) + valStr := "-" + val, ok := metrics[name][unit] + if ok { + valStr = formatNumber(val, !noUnit) + } + row = append(row, valStr) } } t.AppendRow(row) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 4d25b055548..402758652ee 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -147,6 +147,8 @@ teardown() { {"name": "dropped", "unit": "packet", "value": 10, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, {"name": "dropped", "unit": "packet", "value": 23, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, {"name": "dropped", "unit": "packet", "value": 58, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv4", "origin": "lists:anotherlist"}}, + {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "lists:anotherlist"}}, {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv6", "origin": "cscli"}} ] } @@ -158,44 +160,50 @@ teardown() { rune -0 cscli metrics show bouncers -o json assert_json '{ "bouncers": { - "testbouncer": { - "": { - "foo": { - "dogyear": 2, - "pound": 5 - } - }, - "CAPI": { - "dropped": { - "byte": 3800, - "packet": 100 - } - }, - "cscli": { - "active_decisions": { - "ip": 1 - }, - "dropped": { - "byte": 380, - "packet": 10 - } - }, - "lists:firehol_cruzit_web_attacks": { - "dropped": { - "byte": 1034, - "packet": 23 - } - }, - "lists:firehol_voipbl": { - "active_decisions": { - "ip": 51936 - }, - "dropped": { - "byte": 3847, - "packet": 58 - } + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 1 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 51936 + }, + "dropped": { + "byte": 3847, + "packet": 58 + }, + }, + "lists:anotherlist": { + "dropped": { + "byte": 0, + "packet": 0 + } + } } - } } }' @@ -206,10 +214,11 @@ teardown() { | Origin | active_decisions | dropped | foo | | | ip | byte | packet | dogyear | pound | +----------------------------------+------------------+---------+---------+---------+-------+ - | CAPI (community blocklist) | 0 | 3.80k | 100 | 0 | 0 | - | cscli | 1 | 380 | 10 | 0 | 0 | - | lists:firehol_cruzit_web_attacks | 0 | 1.03k | 23 | 0 | 0 | - | lists:firehol_voipbl | 51.94k | 3.85k | 58 | 0 | 0 | + | CAPI (community blocklist) | - | 3.80k | 100 | - | - | + | cscli | 1 | 380 | 10 | - | - | + | lists:anotherlist | - | 0 | 0 | - | - | + | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | + | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | +----------------------------------+------------------+---------+---------+---------+-------+ | Total | 51.94k | 9.06k | 191 | 2 | 5 | +----------------------------------+------------------+---------+---------+---------+-------+ @@ -297,8 +306,8 @@ teardown() { | Origin | dropped | processed | | | byte | bytes | packet | +----------------------------+---------+-----------+-----------+ - | CAPI (community blocklist) | 1.00k | 0 | 0 | - | lists:somelist | 800 | 0 | 0 | + | CAPI (community blocklist) | 1.00k | - | - | + | lists:somelist | 800 | - | - | +----------------------------+---------+-----------+-----------+ | Total | 1.80k | 12.34k | 100 | +----------------------------+---------+-----------+-----------+ @@ -308,7 +317,7 @@ teardown() { | Origin | dropped | | | byte | packet | +----------------------------+---------+---------+ - | CAPI (community blocklist) | 2.00k | 0 | + | CAPI (community blocklist) | 2.00k | - | | lists:somelist | 1.50k | 20 | +----------------------------+---------+---------+ | Total | 3.50k | 20 | From e8dd87a4941553ddd3fe8665737665b7e9bf2b14 Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 12 Jul 2024 16:18:09 +0200 Subject: [PATCH 092/119] pluralize known unit names --- cmd/crowdsec-cli/climetrics/statbouncer.go | 12 ++++++++++++ test/bats/08_metrics_bouncer.bats | 10 +++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 33c2e82b8b4..0105e345313 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -39,6 +39,12 @@ type statBouncer struct { aggregatedAllOrigin map[string]map[string]map[string]int64 } +var knownPlurals = map[string]string{ + "byte": "bytes", + "packet": "packets", + "ip": "IPs", +} + func (s *statBouncer) MarshalJSON() ([]byte, error) { return json.Marshal(s.aggregated) } @@ -230,6 +236,12 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor for _, unit := range maptools.SortedKeys(columns[name]) { colNum += 1 header1 = append(header1, name) + + // we don't add "s" to random words + if knownPlurals[unit] != "" { + unit = knownPlurals[unit] + } + header2 = append(header2, unit) colCfg = append(colCfg, table.ColumnConfig{ Number: colNum, diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 402758652ee..f477bbdb3c2 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -212,7 +212,7 @@ teardown() { Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | - | | ip | byte | packet | dogyear | pound | + | | IPs | bytes | packets | dogyear | pound | +----------------------------------+------------------+---------+---------+---------+-------+ | CAPI (community blocklist) | - | 3.80k | 100 | - | - | | cscli | 1 | 380 | 10 | - | - | @@ -254,7 +254,7 @@ teardown() { labels: origin: lists:somelist - name: processed - unit: bytes + unit: byte value: 12340 - name: processed unit: packet @@ -297,14 +297,14 @@ teardown() { rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" rune -0 cscli metrics show bouncers -o json - assert_json '{bouncers:{bouncer1:{"":{processed:{bytes:12340,packet:100}},CAPI:{dropped:{byte:1000}},"lists:somelist":{dropped:{byte:800}}},bouncer2:{"lists:somelist":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' + assert_json '{bouncers:{bouncer1:{"":{processed:{byte:12340,packet:100}},CAPI:{dropped:{byte:1000}},"lists:somelist":{dropped:{byte:800}}},bouncer2:{"lists:somelist":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' rune -0 cscli metrics show bouncers assert_output - <<-EOT Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: +----------------------------+---------+-----------------------+ | Origin | dropped | processed | - | | byte | bytes | packet | + | | bytes | bytes | packets | +----------------------------+---------+-----------+-----------+ | CAPI (community blocklist) | 1.00k | - | - | | lists:somelist | 800 | - | - | @@ -315,7 +315,7 @@ teardown() { Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: +----------------------------+-------------------+ | Origin | dropped | - | | byte | packet | + | | bytes | packets | +----------------------------+---------+---------+ | CAPI (community blocklist) | 2.00k | - | | lists:somelist | 1.50k | 20 | From d671a67745845d04878c6de8351c8072ac6ea1b4 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 09:58:27 +0200 Subject: [PATCH 093/119] enable 08_* func tests --- test/bats/08_metrics_bouncer.bats | 1 + test/bats/08_metrics_machines.bats | 1 + 2 files changed, 2 insertions(+) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index f477bbdb3c2..778452644dd 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -15,6 +15,7 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start + skip "require the usage_metrics endpoint on apiserver" } teardown() { diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats index 3b73839e753..e63078124a9 100644 --- a/test/bats/08_metrics_machines.bats +++ b/test/bats/08_metrics_machines.bats @@ -15,6 +15,7 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start + skip "require the usage_metrics endpoint on apiserver" } teardown() { From facc26d08a2b779f136139bb4b5cbaf0564bd776 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 09:59:34 +0200 Subject: [PATCH 094/119] comments --- cmd/crowdsec-cli/climetrics/show.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index b567a911107..7559463b66b 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -35,11 +35,8 @@ func (cli *cliMetrics) show(ctx context.Context, sections []string, url string, ms := NewMetricStore() - // XXX: only on lapi db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { - // XXX how to handle this - if we are not on lapi, etc. - // we may read lp metrics without lapi? log.Warnf("unable to open database: %s", err) } From 308b29bdb0d48ff8bf1be0ef22370b35cf798752 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 13:14:54 +0200 Subject: [PATCH 095/119] use version.DetectOS() from go-cs-lib --- cmd/crowdsec/lpmetrics.go | 18 ++---------------- go.mod | 2 +- go.sum | 4 ++-- pkg/apiserver/apic_metrics.go | 16 +--------------- pkg/cwversion/version.go | 15 ++------------- 5 files changed, 8 insertions(+), 47 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 1ab19e8e8d6..db00f36a068 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -6,20 +6,19 @@ import ( "net/http" "time" - "github.com/blackfireio/osinfo" "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/models" - "github.com/crowdsecurity/go-cs-lib/version" ) // MetricsProvider collects metrics from the LP and sends them to the LAPI @@ -75,7 +74,7 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou datasourceMap[ds.GetName()] += 1 } - osName, osVersion := detectOS() + osName, osVersion := version.DetectOS() return staticMetrics{ osName: osName, @@ -88,19 +87,6 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou } } -func detectOS() (string, string) { - osInfo, err := osinfo.GetOSInfo() - if err != nil { - return version.System, "???" - } - - if osInfo.Name != "" && version.System == "docker" { - return osInfo.Name + " (docker)", osInfo.Version - } - - return osInfo.Name, osInfo.Version -} - func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { return &MetricsProvider{ diff --git a/go.mod b/go.mod index 1bc63a470df..6a2146efc5f 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.11 + github.com/crowdsecurity/go-cs-lib v0.0.13 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ba4e6267bb9..faca9797341 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.11 h1:ygUOKrkMLaJ2wjC020LgtY6XDkToNFK4NmYlhpkk5ko= -github.com/crowdsecurity/go-cs-lib v0.0.11/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.13 h1:asmtjIEPOibUK8eaYQCIR7XIBU/EX5vyAp1EbKFQJtY= +github.com/crowdsecurity/go-cs-lib v0.0.13/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 3279432973a..817a62a8dac 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -8,7 +8,6 @@ import ( "strings" "time" - "github.com/blackfireio/osinfo" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -24,19 +23,6 @@ type dbPayload struct { Metrics []*models.DetailedMetrics `json:"metrics"` } -func detectOS() (string, string) { - if version.System == "docker" { - return "docker", "" - } - - osInfo, err := osinfo.GetOSInfo() - if err != nil { - return version.System, "???" - } - - return osInfo.Name, osInfo.Version -} - func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) @@ -159,7 +145,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { }, } - osName, osVersion := detectOS() + osName, osVersion := version.DetectOS() allMetrics.Lapi.Os = &models.OSversion{ Name: ptr.Of(osName), diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 01509833c1c..53990fb23d6 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -21,19 +21,8 @@ const ( Constraint_acquis = ">= 1.0, < 2.0" ) -func versionWithTag() string { - // if the version number already contains the tag, don't duplicate it - ret := version.Version - - if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g"+version.Tag+"-dirty") { - ret += "-" + version.Tag - } - - return ret -} - func FullString() string { - ret := fmt.Sprintf("version: %s\n", versionWithTag()) + ret := fmt.Sprintf("version: %s\n", version.String()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) @@ -49,7 +38,7 @@ func FullString() string { } func UserAgent() string { - return "crowdsec/" + versionWithTag() + "-" + version.System + return "crowdsec/" + version.String() + "-" + version.System } // VersionStrip remove the tag from the version string, used to match with a hub branch From 1f8e1dadccb5e22edf212e1a643d51335ff8715b Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 15 Jul 2024 13:42:09 +0200 Subject: [PATCH 096/119] move metrics tomb to global for now --- cmd/crowdsec/crowdsec.go | 3 --- cmd/crowdsec/main.go | 15 ++++++++------- cmd/crowdsec/serve.go | 7 +++++++ 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 601fac6ab08..0037da6a026 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -8,7 +8,6 @@ import ( "time" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/trace" @@ -150,8 +149,6 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H hub, ) - lpMetricsTomb := tomb.Tomb{} - lpMetricsTomb.Go(func() error { // XXX: context? return mp.Run(context.Background(), &lpMetricsTomb) diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 26e39eb069c..32cab79083f 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -30,13 +30,14 @@ import ( var ( /*tombs for the parser, buckets and outputs.*/ - acquisTomb tomb.Tomb - parsersTomb tomb.Tomb - bucketsTomb tomb.Tomb - outputsTomb tomb.Tomb - apiTomb tomb.Tomb - crowdsecTomb tomb.Tomb - pluginTomb tomb.Tomb + acquisTomb tomb.Tomb + parsersTomb tomb.Tomb + bucketsTomb tomb.Tomb + outputsTomb tomb.Tomb + apiTomb tomb.Tomb + crowdsecTomb tomb.Tomb + pluginTomb tomb.Tomb + lpMetricsTomb tomb.Tomb flags *Flags diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 5fb7b86f181..b28fc019460 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -179,6 +179,13 @@ func ShutdownCrowdsecRoutines() error { log.Warningf("Outputs didn't finish in time, some events may have not been flushed") } + lpMetricsTomb.Kill(nil) + + if err := lpMetricsTomb.Wait(); err != nil { + log.Warningf("Metrics returned error : %s", err) + reterr = err + } + // He's dead, Jim. crowdsecTomb.Kill(nil) From b865eec1484aad24dde5bce23d4bc25f4eccfaad Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Mon, 15 Jul 2024 14:30:05 +0200 Subject: [PATCH 097/119] remove configuration option for metrics interval --- cmd/crowdsec/crowdsec.go | 2 +- cmd/crowdsec/lpmetrics.go | 2 + pkg/apiserver/apic.go | 72 +++++++++++++++------------ pkg/apiserver/apic_metrics.go | 6 +-- pkg/csconfig/crowdsec_service.go | 24 --------- pkg/csconfig/crowdsec_service_test.go | 21 ++++---- 6 files changed, 54 insertions(+), 73 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 0037da6a026..420e4b93d95 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -142,7 +142,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H mp := NewMetricsProvider( apiClient, - *cConfig.Crowdsec.MetricsInterval, + lpMetricsDefaultInterval, log.WithField("service", "lpmetrics"), cConfig.API.Server.ConsoleConfig.EnabledOptions(), datasources, diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index db00f36a068..f56daf51e0f 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -21,6 +21,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +const lpMetricsDefaultInterval = 30 * time.Minute + // MetricsProvider collects metrics from the LP and sends them to the LAPI type MetricsProvider struct { apic *apiclient.ApiClient diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 68dc94367e2..284d0acdabf 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -35,26 +35,30 @@ import ( const ( // delta values must be smaller than the interval - pullIntervalDefault = time.Hour * 2 - pullIntervalDelta = 5 * time.Minute - pushIntervalDefault = time.Second * 10 - pushIntervalDelta = time.Second * 7 - metricsIntervalDefault = time.Minute * 30 - metricsIntervalDelta = time.Minute * 15 + pullIntervalDefault = time.Hour * 2 + pullIntervalDelta = 5 * time.Minute + pushIntervalDefault = time.Second * 10 + pushIntervalDelta = time.Second * 7 + metricsIntervalDefault = time.Minute * 30 + metricsIntervalDelta = time.Minute * 15 + usageMetricsInterval = time.Minute * 30 + usageMetricsIntervalFirst = time.Minute * 15 ) type apic struct { // when changing the intervals in tests, always set *First too // or they can be negative - pullInterval time.Duration - pullIntervalFirst time.Duration - pushInterval time.Duration - pushIntervalFirst time.Duration - metricsInterval time.Duration - metricsIntervalFirst time.Duration - dbClient *database.Client - apiClient *apiclient.ApiClient - AlertsAddChan chan []*models.Alert + pullInterval time.Duration + pullIntervalFirst time.Duration + pushInterval time.Duration + pushIntervalFirst time.Duration + metricsInterval time.Duration + metricsIntervalFirst time.Duration + usageMetricsInterval time.Duration + usageMetricsIntervalFirst time.Duration + dbClient *database.Client + apiClient *apiclient.ApiClient + AlertsAddChan chan []*models.Alert mu sync.Mutex pushTomb tomb.Tomb @@ -175,24 +179,26 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con var err error ret := &apic{ - AlertsAddChan: make(chan []*models.Alert), - dbClient: dbClient, - mu: sync.Mutex{}, - startup: true, - credentials: config.Credentials, - pullTomb: tomb.Tomb{}, - pushTomb: tomb.Tomb{}, - metricsTomb: tomb.Tomb{}, - scenarioList: make([]string, 0), - consoleConfig: consoleConfig, - pullInterval: pullIntervalDefault, - pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), - pushInterval: pushIntervalDefault, - pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), - metricsInterval: metricsIntervalDefault, - metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), - isPulling: make(chan bool, 1), - whitelists: apicWhitelist, + AlertsAddChan: make(chan []*models.Alert), + dbClient: dbClient, + mu: sync.Mutex{}, + startup: true, + credentials: config.Credentials, + pullTomb: tomb.Tomb{}, + pushTomb: tomb.Tomb{}, + metricsTomb: tomb.Tomb{}, + scenarioList: make([]string, 0), + consoleConfig: consoleConfig, + pullInterval: pullIntervalDefault, + pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), + pushInterval: pushIntervalDefault, + pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), + metricsInterval: metricsIntervalDefault, + metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), + usageMetricsInterval: usageMetricsInterval, + usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalFirst), + isPulling: make(chan bool, 1), + whitelists: apicWhitelist, } password := strfmt.Password(config.Credentials.Password) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 817a62a8dac..e39d66b772a 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -332,7 +332,7 @@ func (a *apic) SendUsageMetrics() { firstRun := true - ticker := time.NewTicker(time.Millisecond) + ticker := time.NewTicker(a.usageMetricsIntervalFirst) for { select { @@ -343,7 +343,7 @@ func (a *apic) SendUsageMetrics() { case <-ticker.C: if firstRun { firstRun = false - ticker.Reset(30 * time.Minute) + ticker.Reset(a.usageMetricsInterval) } metrics, metricsId, err := a.GetUsageMetrics() if err != nil { @@ -365,7 +365,7 @@ func (a *apic) SendUsageMetrics() { log.Errorf("unable to mark usage metrics as sent: %s", err) continue } - log.Infof("Usage metrics sent") + log.Infof("Sent %d usage metrics", len(metricsId)) } } } diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 3acb53763e5..7820595b46f 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -4,7 +4,6 @@ import ( "fmt" "os" "path/filepath" - "time" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" @@ -12,11 +11,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" ) -const ( - defaultMetricsInterval = 30 * time.Minute - minimumMetricsInterval = 15 * time.Minute -) - // CrowdsecServiceCfg contains the location of parsers/scenarios/... and acquisition files type CrowdsecServiceCfg struct { Enable *bool `yaml:"enable"` @@ -32,7 +26,6 @@ type CrowdsecServiceCfg struct { BucketStateFile string `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start BucketStateDumpDir string `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown BucketsGCEnabled bool `yaml:"-"` // we need to garbage collect buckets when in forensic mode - MetricsInterval *time.Duration `yaml:"metrics_interval,omitempty"` SimulationFilePath string `yaml:"-"` ContextToSend map[string][]string `yaml:"-"` @@ -139,8 +132,6 @@ func (c *Config) LoadCrowdsec() error { c.Crowdsec.AcquisitionFiles[i] = f } - c.Crowdsec.setMetricsInterval() - if err = c.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } @@ -148,21 +139,6 @@ func (c *Config) LoadCrowdsec() error { return nil } -func (c *CrowdsecServiceCfg) setMetricsInterval() { - switch { - case c.MetricsInterval == nil: - log.Tracef("metrics_interval is not set, default to %s", defaultMetricsInterval) - c.MetricsInterval = ptr.Of(defaultMetricsInterval) - case *c.MetricsInterval == time.Duration(0): - log.Info("metrics_interval is set to 0, disabling metrics") - case *c.MetricsInterval < minimumMetricsInterval: - log.Warnf("metrics_interval is too low (%s), setting it to %s", *c.MetricsInterval, minimumMetricsInterval) - c.MetricsInterval = ptr.Of(minimumMetricsInterval) - default: - log.Tracef("metrics_interval set to %s", c.MetricsInterval) - } -} - func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { // XXX: MakeDirs out, err := yaml.Marshal(c.ContextToSend) diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index a7e4121127b..72241526679 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -58,13 +58,12 @@ func TestLoadCrowdsec(t *testing.T) { ParserRoutinesCount: 1, OutputRoutinesCount: 1, ConsoleContextValueLength: 2500, - MetricsInterval: ptr.Of(defaultMetricsInterval), AcquisitionFiles: []string{acquisFullPath}, SimulationFilePath: "./testdata/simulation.yaml", // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, @@ -99,12 +98,11 @@ func TestLoadCrowdsec(t *testing.T) { ParserRoutinesCount: 1, OutputRoutinesCount: 1, ConsoleContextValueLength: 0, - MetricsInterval: ptr.Of(defaultMetricsInterval), AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath}, // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationFilePath: "./testdata/simulation.yaml", SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), @@ -138,13 +136,12 @@ func TestLoadCrowdsec(t *testing.T) { ParserRoutinesCount: 1, OutputRoutinesCount: 1, ConsoleContextValueLength: 10, - MetricsInterval: ptr.Of(defaultMetricsInterval), AcquisitionFiles: []string{}, SimulationFilePath: "", // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, From 8990cd8d23db79ef098edd88014e1a230d7c2ca4 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 15:15:20 +0200 Subject: [PATCH 098/119] fix usage_metrics endpoint to work with cert auth, add some tests --- pkg/apiserver/controllers/controller.go | 13 +++++++--- pkg/apiserver/controllers/v1/usagemetrics.go | 7 +++++ test/bats/08_metrics_bouncer.bats | 1 - test/bats/08_metrics_machines.bats | 1 - test/bats/10_bouncers.bats | 18 +++++++++++++ test/bats/11_bouncers_tls.bats | 27 ++++++++++++++++++++ 6 files changed, 62 insertions(+), 5 deletions(-) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 441dccbe912..8175f431384 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -4,6 +4,7 @@ import ( "context" "net" "net/http" + "strings" "github.com/alexliesenfeld/health" "github.com/gin-gonic/gin" @@ -61,11 +62,17 @@ func serveHealth() http.HandlerFunc { func eitherAuthMiddleware(jwtMiddleware gin.HandlerFunc, apiKeyMiddleware gin.HandlerFunc) gin.HandlerFunc { return func(c *gin.Context) { - // XXX: what when there's no api key for a RC? - if c.GetHeader("X-Api-Key") != "" { + switch { + case c.GetHeader("X-Api-Key") != "": apiKeyMiddleware(c) - } else { + case c.GetHeader("Authorization") != "": jwtMiddleware(c) + // uh no auth header. is this TLS with mutual authentication? + case strings.HasPrefix(c.Request.UserAgent(), "crowdsec/"): + // guess log processors by sniffing user-agent + jwtMiddleware(c) + default: + apiKeyMiddleware(c) } } } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index b0453beface..52db93a4c4a 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -75,6 +75,13 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { generatedBy = machineID } + if generatedBy == "" { + // how did we get here? + logger.Error("No machineID or bouncer in request context after authentication") + gctx.JSON(http.StatusInternalServerError, gin.H{"message": "No machineID or bouncer in request context after authentication"}) + return + } + if machineID != "" && bouncer != nil { logger.Errorf("Payload has both machineID and bouncer") gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has both LP and RC data"}) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 778452644dd..f477bbdb3c2 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -15,7 +15,6 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start - skip "require the usage_metrics endpoint on apiserver" } teardown() { diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats index e63078124a9..3b73839e753 100644 --- a/test/bats/08_metrics_machines.bats +++ b/test/bats/08_metrics_machines.bats @@ -15,7 +15,6 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start - skip "require the usage_metrics endpoint on apiserver" } teardown() { diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index b6efbd06650..a89c9f9dd65 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -42,6 +42,24 @@ teardown() { assert_json '[]' } +@test "bouncer api-key auth" { + rune -0 cscli bouncers add ciTestBouncer --key "goodkey" + + # connect with good credentials + rune -0 curl-tcp "/v1/decisions" -sS --fail-with-body -H "X-Api-Key: goodkey" + assert_output null + + # connect with bad credentials + rune -22 curl-tcp "/v1/decisions" -sS --fail-with-body -H "X-Api-Key: badkey" + assert_stderr --partial 'error: 403' + assert_json '{message:"access forbidden"}' + + # connect with no credentials + rune -22 curl-tcp "/v1/decisions" -sS --fail-with-body + assert_stderr --partial 'error: 403' + assert_json '{message:"access forbidden"}' +} + @test "bouncers delete has autocompletion" { rune -0 cscli bouncers add foo1 rune -0 cscli bouncers add foo2 diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 849b3a5b35c..ca916f394aa 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -162,6 +162,33 @@ teardown() { rune cscli bouncers delete localhost@127.0.0.1 } +@test "a bouncer authenticated with TLS can send metrics" { + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + # with mutual authentication there is no api key, so it's detected as RC if user agent != crowdsec + + rune -22 curl --fail-with-body -sS \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/usage-metrics -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing remediation component data"}' + + rune -22 curl --fail-with-body -sS \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + --user-agent "crowdsec/someversion" \ + https://localhost:8080/v1/usage-metrics -X POST --data "$payload" + assert_stderr --partial 'error: 401' + assert_json '{code:401, message: "cookie token is empty"}' +} + @test "simulate a bouncer request with an invalid cert" { rune -77 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_invalid.pem" \ From 259d202f21ca63f6c7e435042295631946dc814c Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 15:27:13 +0200 Subject: [PATCH 099/119] comments, lint --- cmd/crowdsec/crowdsec.go | 1 - cmd/crowdsec/lpmetrics.go | 4 ---- pkg/apiserver/controllers/v1/errors_test.go | 2 +- pkg/csconfig/crowdsec_service_test.go | 18 +++++++++--------- pkg/database/bouncers.go | 2 +- pkg/database/flush.go | 2 +- pkg/database/metrics.go | 6 ------ 7 files changed, 12 insertions(+), 23 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 420e4b93d95..1de885027e0 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -150,7 +150,6 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H ) lpMetricsTomb.Go(func() error { - // XXX: context? return mp.Run(context.Background(), &lpMetricsTomb) }) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index f56daf51e0f..fd0249bd03f 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -127,8 +127,6 @@ func (m *MetricsProvider) metricsPayload() *models.AllMetrics { Items: make([]*models.MetricsDetailItem, 0), }) - // TODO: more metric details... ? - return &models.AllMetrics{ LogProcessors: []*models.LogProcessorsMetrics{met}, } @@ -148,8 +146,6 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { for { select { case <-ticker.C: - //.UtcNowTimestamp = ptr.Of(time.Now().Unix()) - ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() diff --git a/pkg/apiserver/controllers/v1/errors_test.go b/pkg/apiserver/controllers/v1/errors_test.go index 93fa956bd15..da962f127c3 100644 --- a/pkg/apiserver/controllers/v1/errors_test.go +++ b/pkg/apiserver/controllers/v1/errors_test.go @@ -15,7 +15,7 @@ func TestCollapseRepeatedPrefix(t *testing.T) { want string }{ { - input: "aaabbbcccaaa", + input: "aaabbbcccaaa", prefix: "aaa", want: "aaabbbcccaaa"}, { diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 72241526679..8ce084807c6 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -61,9 +61,9 @@ func TestLoadCrowdsec(t *testing.T) { AcquisitionFiles: []string{acquisFullPath}, SimulationFilePath: "./testdata/simulation.yaml", // context is loaded in pkg/alertcontext - // ContextToSend: map[string][]string{ - // "source_ip": {"evt.Parsed.source_ip"}, - // }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, @@ -100,9 +100,9 @@ func TestLoadCrowdsec(t *testing.T) { ConsoleContextValueLength: 0, AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath}, // context is loaded in pkg/alertcontext - // ContextToSend: map[string][]string{ - // "source_ip": {"evt.Parsed.source_ip"}, - // }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationFilePath: "./testdata/simulation.yaml", SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), @@ -139,9 +139,9 @@ func TestLoadCrowdsec(t *testing.T) { AcquisitionFiles: []string{}, SimulationFilePath: "", // context is loaded in pkg/alertcontext - // ContextToSend: map[string][]string{ - // "source_ip": {"evt.Parsed.source_ip"}, - // }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 79e0cef3df1..e60718975b0 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -26,7 +26,7 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string SetType(bouncerType). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update base bouncer metrics in database: %s", err) + return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) } return nil } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 9a66608eaf5..1ac42b5a8f8 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -130,7 +130,7 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { metric.CollectedAtLTE(time.Now().UTC().Add(-*maxAge)), ).Exec(c.CTX) if err != nil { - c.Log.Errorf("while flushing metrics: %s", err) + c.Log.Errorf("while flushing metrics: %w", err) return } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 1f7e4cf217b..107765f13b2 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -8,12 +8,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) -// TODO: -// what if they are alrady in the db (should get an error from the unique index) -// CollectMetricsToPush (count limit? including stale?) -// SetPushedMetrics -// RemoveOldMetrics - func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, collectedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. Create(). From df4cd7eeace4fda5abf724ed44ccafc7fcf5eb7d Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 15:58:35 +0200 Subject: [PATCH 100/119] set pushed metrics timestamp in utc; lint --- cmd/crowdsec/lpmetrics.go | 2 +- cmd/crowdsec/main.go | 12 ++++---- pkg/apiclient/usagemetrics.go | 1 + pkg/apiserver/apic_metrics.go | 25 +++++++++------- pkg/apiserver/apiserver.go | 1 - pkg/apiserver/controllers/v1/errors.go | 2 ++ pkg/apiserver/controllers/v1/errors_test.go | 31 ++++++++++---------- pkg/apiserver/controllers/v1/usagemetrics.go | 22 ++++++++++++-- pkg/apiserver/usage_metrics_test.go | 7 ++--- pkg/csconfig/crowdsec_service_test.go | 1 + pkg/csconfig/database.go | 6 ++-- pkg/cwversion/version.go | 2 +- pkg/database/bouncers.go | 1 + pkg/database/flush.go | 7 ++--- pkg/database/machines.go | 1 - pkg/database/metrics.go | 3 +- 16 files changed, 69 insertions(+), 55 deletions(-) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index fd0249bd03f..0fd27054071 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -141,7 +141,7 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { met := m.metricsPayload() - ticker := time.NewTicker(1) //Send on start + ticker := time.NewTicker(1) // Send on start for { select { diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 32cab79083f..18416e044e7 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -29,7 +29,7 @@ import ( ) var ( - /*tombs for the parser, buckets and outputs.*/ + // tombs for the parser, buckets and outputs. acquisTomb tomb.Tomb parsersTomb tomb.Tomb bucketsTomb tomb.Tomb @@ -41,17 +41,17 @@ var ( flags *Flags - /*the state of acquisition*/ + // the state of acquisition dataSources []acquisition.DataSource - /*the state of the buckets*/ + // the state of the buckets holders []leakybucket.BucketFactory buckets *leakybucket.Buckets inputLineChan chan types.Event inputEventChan chan types.Event outputEventChan chan types.Event // the buckets init returns its own chan that is used for multiplexing - /*settings*/ - lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ + // settings + lastProcessedItem time.Time // keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them. pluginBroker csplugin.PluginBroker ) @@ -308,7 +308,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if cConfig.API != nil && cConfig.API.Server != nil { cConfig.API.Server.OnlineClient = nil } - /*if the api is disabled as well, just read file and exit, don't daemonize*/ + // if the api is disabled as well, just read file and exit, don't daemonize if cConfig.DisableAPI { cConfig.Common.Daemonize = false } diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go index c418f4843d6..1d822bb5c1e 100644 --- a/pkg/apiclient/usagemetrics.go +++ b/pkg/apiclient/usagemetrics.go @@ -17,6 +17,7 @@ func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetric if err != nil { return nil, nil, err } + var response interface{} resp, err := s.client.Do(ctx, req, &response) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index e39d66b772a..54640afc2d0 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -38,7 +38,6 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { } for _, bouncer := range bouncers { - dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) if err != nil { log.Errorf("unable to get bouncer usage metrics: %s", err) @@ -59,11 +58,10 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { rcMetrics.Metrics = make([]*models.DetailedMetrics, 0) - //Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics + // Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics for _, dbMetric := range dbMetrics { - dbPayload := &dbPayload{} - //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + // Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically metricsIds = append(metricsIds, dbMetric.ID) err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) @@ -111,6 +109,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { Version: item.Version, }) } + lpMetrics.HubItems = hubItems } } else { @@ -120,9 +119,8 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) for _, dbMetric := range dbMetrics { - dbPayload := &dbPayload{} - //Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + // Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically metricsIds = append(metricsIds, dbMetric.ID) err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) @@ -137,7 +135,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) } - //FIXME: all of this should only be done once on startup/reload + // FIXME: all of this should only be done once on startup/reload consoleOptions := strings.Join(csconfig.GetConfig().API.Server.ConsoleConfig.EnabledOptions(), ",") allMetrics.Lapi = &models.LapiMetrics{ ConsoleOptions: models.ConsoleOptions{ @@ -164,7 +162,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { Items: make([]*models.MetricsDetailItem, 0), }) - //Force an actual slice to avoid non existing fields in the json + // Force an actual slice to avoid non existing fields in the json if allMetrics.RemediationComponents == nil { allMetrics.RemediationComponents = make([]*models.RemediationComponentsMetrics, 0) } @@ -337,14 +335,16 @@ func (a *apic) SendUsageMetrics() { for { select { case <-a.metricsTomb.Dying(): - //The normal metrics routine also kills push/pull tombs, does that make sense ? + // The normal metrics routine also kills push/pull tombs, does that make sense ? ticker.Stop() return case <-ticker.C: if firstRun { firstRun = false + ticker.Reset(a.usageMetricsInterval) } + metrics, metricsId, err := a.GetUsageMetrics() if err != nil { log.Errorf("unable to get usage metrics: %s", err) @@ -352,19 +352,22 @@ func (a *apic) SendUsageMetrics() { } _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) - if err != nil { log.Errorf("unable to send usage metrics: %s", err) + if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { - //In case of 422, mark the metrics as sent anyway, the API did not like what we sent, and it's unlikely we'll be able to fix it + // In case of 422, mark the metrics as sent anyway, the API did not like what we sent, + // and it's unlikely we'll be able to fix it continue } } + err = a.MarkUsageMetricsAsSent(metricsId) if err != nil { log.Errorf("unable to mark usage metrics as sent: %s", err) continue } + log.Infof("Sent %d usage metrics", len(metricsId)) } } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 373a1600ad6..bd0b5d39bf4 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -372,7 +372,6 @@ func (s *APIServer) Run(apiReady chan bool) error { s.apic.SendUsageMetrics() return nil }) - } s.httpServerTomb.Go(func() error { diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index 278d352561d..d661de44b0e 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -46,9 +46,11 @@ func collapseRepeatedPrefix(text string, prefix string) string { count++ text = strings.TrimPrefix(text, prefix) } + if count > 0 { return prefix + text } + return text } diff --git a/pkg/apiserver/controllers/v1/errors_test.go b/pkg/apiserver/controllers/v1/errors_test.go index da962f127c3..89c561f83bd 100644 --- a/pkg/apiserver/controllers/v1/errors_test.go +++ b/pkg/apiserver/controllers/v1/errors_test.go @@ -2,8 +2,8 @@ package v1 import ( "errors" - "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,26 +15,25 @@ func TestCollapseRepeatedPrefix(t *testing.T) { want string }{ { - input: "aaabbbcccaaa", + input: "aaabbbcccaaa", prefix: "aaa", - want: "aaabbbcccaaa"}, - { - input: "hellohellohello world", + want: "aaabbbcccaaa", + }, { + input: "hellohellohello world", prefix: "hello", - want: "hello world"}, - { - input: "ababababxyz", + want: "hello world", + }, { + input: "ababababxyz", prefix: "ab", - want: "abxyz", - }, - { - input: "xyzxyzxyzxyzxyz", + want: "abxyz", + }, { + input: "xyzxyzxyzxyzxyz", prefix: "xyz", - want: "xyz"}, - { - input: "123123123456", + want: "xyz", + }, { + input: "123123123456", prefix: "456", - want: "123123123456", + want: "123123123456", }, } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 52db93a4c4a..a33b6e033fd 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -2,7 +2,7 @@ package v1 import ( "encoding/json" - "fmt" + "errors" "net/http" "time" @@ -10,10 +10,11 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" - "github.com/crowdsecurity/go-cs-lib/ptr" ) // updateBaseMetrics updates the base metrics for a machine or bouncer @@ -24,7 +25,7 @@ func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, b case bouncer != nil: c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) default: - return fmt.Errorf("no machineID or bouncerName set") + return errors.New("no machineID or bouncerName set") } return nil @@ -41,6 +42,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if err := gctx.ShouldBindJSON(&input); err != nil { logger.Errorf("Failed to bind json: %s", err) gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return } @@ -52,6 +54,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { } logger.Errorf("Failed to validate usage metrics: %s", cleanErr) gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": cleanErr.Error()}) + return } @@ -64,6 +67,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { bouncer, _ := getBouncerFromContext(gctx) if bouncer != nil { logger.Tracef("Received usage metris for bouncer: %s", bouncer.Name) + generatedType = metric.GeneratedTypeRC generatedBy = bouncer.Name } @@ -71,6 +75,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { machineID, _ := getMachineIDFromContext(gctx) if machineID != "" { logger.Tracef("Received usage metrics for log processor: %s", machineID) + generatedType = metric.GeneratedTypeLP generatedBy = machineID } @@ -79,12 +84,14 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { // how did we get here? logger.Error("No machineID or bouncer in request context after authentication") gctx.JSON(http.StatusInternalServerError, gin.H{"message": "No machineID or bouncer in request context after authentication"}) + return } if machineID != "" && bouncer != nil { logger.Errorf("Payload has both machineID and bouncer") gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has both LP and RC data"}) + return } @@ -100,6 +107,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if machineID != "" { logger.Errorf("Missing log processor data") gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing log processor data"}) + return } case 1: @@ -111,8 +119,10 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if err != nil { logger.Errorf("Failed to validate log processor data: %s", err) gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + return } + payload = map[string]any{ "metrics": item0.Metrics, } @@ -123,6 +133,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { logger.Errorf("Payload has more than one log processor") // this is not checked in the swagger schema gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one log processor"}) + return } @@ -131,6 +142,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if bouncer != nil { logger.Errorf("Missing remediation component data") gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing remediation component data"}) + return } case 1: @@ -140,6 +152,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if err != nil { logger.Errorf("Failed to validate remediation component data: %s", err) gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + return } @@ -164,6 +177,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if err != nil { logger.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) + return } @@ -178,12 +192,14 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { if err != nil { logger.Errorf("Failed to marshal usage metrics: %s", err) c.HandleDBErrors(gctx, err) + return } if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, collectedAt, string(jsonPayload)); err != nil { logger.Error(err) c.HandleDBErrors(gctx, err) + return } diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index aa2b2a532e8..41dd0ccdc2c 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -6,9 +6,10 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" - "github.com/stretchr/testify/assert" ) func TestLPMetrics(t *testing.T) { @@ -184,7 +185,6 @@ func TestLPMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) @@ -211,7 +211,6 @@ func TestLPMetrics(t *testing.T) { } }) } - } func TestRCMetrics(t *testing.T) { @@ -356,7 +355,6 @@ func TestRCMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) @@ -383,5 +381,4 @@ func TestRCMetrics(t *testing.T) { } }) } - } diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 8ce084807c6..7570b63011e 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -184,6 +184,7 @@ func TestLoadCrowdsec(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadCrowdsec() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index fb1e82ac100..4ca582cf576 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -81,9 +81,9 @@ func (c *Config) LoadDBConfig(inCli bool) error { case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) log.Warning( - "You are using sqlite without WAL, this can have a performance impact. " + - "If you do not store the database in a network share, set db_config.use_wal to true. " + - "Set explicitly to false to disable this warning.") + "You are using sqlite without WAL, this can have a performance impact. " + + "If you do not store the database in a network share, set db_config.use_wal to true. " + + "Set explicitly to false to disable this warning.") case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 53990fb23d6..28d5c2a621c 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -10,7 +10,7 @@ import ( ) var ( - Codename string // = "SoumSoum" + Codename string // = "SoumSoum" Libre2 = "WebAssembly" ) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index e60718975b0..ff750e63c59 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -28,6 +28,7 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string if err != nil { return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) } + return nil } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 1ac42b5a8f8..580235bc8f1 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -99,6 +99,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched log.Warning("bouncers auto-delete for login/password auth is not supported (use cert or api)") } } + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) @@ -130,7 +131,7 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { metric.CollectedAtLTE(time.Now().UTC().Add(-*maxAge)), ).Exec(c.CTX) if err != nil { - c.Log.Errorf("while flushing metrics: %w", err) + c.Log.Errorf("while flushing metrics: %s", err) return } @@ -154,7 +155,6 @@ func (c *Client) FlushOrphans() { eventsCount, err = c.Ent.Decision.Delete().Where( decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) - if err != nil { c.Log.Warningf("error while deleting orphan decisions: %s", err) return @@ -175,7 +175,6 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { ).Where( bouncer.AuthTypeEQ(authType), ).Exec(c.CTX) - if err != nil { c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) return @@ -196,7 +195,6 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { machine.Not(machine.HasAlerts()), machine.AuthTypeEQ(authType), ).Exec(c.CTX) - if err != nil { c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) return @@ -290,7 +288,6 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { if maxid > 0 { // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) - if err != nil { c.Log.Errorf("FlushAlerts: Could not delete alerts: %s", err) return fmt.Errorf("could not delete alerts: %w", err) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 35273d28d27..21349b8b687 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -55,7 +55,6 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). - // TODO: update scenarios Save(c.CTX) if err != nil { return fmt.Errorf("unable to update base machine metrics in database: %w", err) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 107765f13b2..8c2c890e490 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -100,8 +100,7 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric func (c *Client) MarkUsageMetricsAsSent(ids []int) error { _, err := c.Ent.Metric.Update(). Where(metric.IDIn(ids...)). - // XXX: no utc? - SetPushedAt(time.Now()). + SetPushedAt(time.Now().UTC()). Save(c.CTX) if err != nil { c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) From 9fe3b5f66413751a94ae22bac778f05642f50bcb Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 15 Jul 2024 16:27:03 +0200 Subject: [PATCH 101/119] remove test bouncer after use --- test/bats/11_bouncers_tls.bats | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index ca916f394aa..554308ae962 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -187,6 +187,8 @@ teardown() { https://localhost:8080/v1/usage-metrics -X POST --data "$payload" assert_stderr --partial 'error: 401' assert_json '{code:401, message: "cookie token is empty"}' + + rune cscli bouncers delete localhost@127.0.0.1 } @test "simulate a bouncer request with an invalid cert" { From d1ffcac9908748a4ce11c8cfff436fa1a41aeb5a Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Thu, 18 Jul 2024 20:27:03 +0200 Subject: [PATCH 102/119] properly init/reinit lpmetrics tomb --- cmd/crowdsec/serve.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index b28fc019460..f1a658e9512 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -60,6 +60,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { apiTomb = tomb.Tomb{} crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + lpMetricsTomb = tomb.Tomb{} cConfig, err := LoadConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false) if err != nil { @@ -186,6 +187,8 @@ func ShutdownCrowdsecRoutines() error { reterr = err } + log.Debugf("metrics are done") + // He's dead, Jim. crowdsecTomb.Kill(nil) @@ -329,6 +332,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { apiTomb = tomb.Tomb{} crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + lpMetricsTomb = tomb.Tomb{} ctx := context.TODO() From e6bf753240a1a64bb7010d7b8f3ee644b052d255 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:59:24 +0200 Subject: [PATCH 103/119] CI: update test dependencies (#3146) --- docker/test/Pipfile.lock | 213 +++++++++++++++++++-------------------- 1 file changed, 106 insertions(+), 107 deletions(-) diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock index 75437876b72..2cb587b6b88 100644 --- a/docker/test/Pipfile.lock +++ b/docker/test/Pipfile.lock @@ -18,11 +18,11 @@ "default": { "certifi": { "hashes": [ - "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1", - "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474" + "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", + "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" ], "markers": "python_version >= '3.6'", - "version": "==2023.11.17" + "version": "==2024.7.4" }, "cffi": { "hashes": [ @@ -180,65 +180,60 @@ }, "cryptography": { "hashes": [ - "sha256:087887e55e0b9c8724cf05361357875adb5c20dec27e5816b653492980d20380", - "sha256:09a77e5b2e8ca732a19a90c5bca2d124621a1edb5438c5daa2d2738bfeb02589", - "sha256:130c0f77022b2b9c99d8cebcdd834d81705f61c68e91ddd614ce74c657f8b3ea", - "sha256:141e2aa5ba100d3788c0ad7919b288f89d1fe015878b9659b307c9ef867d3a65", - "sha256:28cb2c41f131a5758d6ba6a0504150d644054fd9f3203a1e8e8d7ac3aea7f73a", - "sha256:2f9f14185962e6a04ab32d1abe34eae8a9001569ee4edb64d2304bf0d65c53f3", - "sha256:320948ab49883557a256eab46149df79435a22d2fefd6a66fe6946f1b9d9d008", - "sha256:36d4b7c4be6411f58f60d9ce555a73df8406d484ba12a63549c88bd64f7967f1", - "sha256:3b15c678f27d66d247132cbf13df2f75255627bcc9b6a570f7d2fd08e8c081d2", - "sha256:3dbd37e14ce795b4af61b89b037d4bc157f2cb23e676fa16932185a04dfbf635", - "sha256:4383b47f45b14459cab66048d384614019965ba6c1a1a141f11b5a551cace1b2", - "sha256:44c95c0e96b3cb628e8452ec060413a49002a247b2b9938989e23a2c8291fc90", - "sha256:4b063d3413f853e056161eb0c7724822a9740ad3caa24b8424d776cebf98e7ee", - "sha256:52ed9ebf8ac602385126c9a2fe951db36f2cb0c2538d22971487f89d0de4065a", - "sha256:55d1580e2d7e17f45d19d3b12098e352f3a37fe86d380bf45846ef257054b242", - "sha256:5ef9bc3d046ce83c4bbf4c25e1e0547b9c441c01d30922d812e887dc5f125c12", - "sha256:5fa82a26f92871eca593b53359c12ad7949772462f887c35edaf36f87953c0e2", - "sha256:61321672b3ac7aade25c40449ccedbc6db72c7f5f0fdf34def5e2f8b51ca530d", - "sha256:701171f825dcab90969596ce2af253143b93b08f1a716d4b2a9d2db5084ef7be", - "sha256:841ec8af7a8491ac76ec5a9522226e287187a3107e12b7d686ad354bb78facee", - "sha256:8a06641fb07d4e8f6c7dda4fc3f8871d327803ab6542e33831c7ccfdcb4d0ad6", - "sha256:8e88bb9eafbf6a4014d55fb222e7360eef53e613215085e65a13290577394529", - "sha256:a00aee5d1b6c20620161984f8ab2ab69134466c51f58c052c11b076715e72929", - "sha256:a047682d324ba56e61b7ea7c7299d51e61fd3bca7dad2ccc39b72bd0118d60a1", - "sha256:a7ef8dd0bf2e1d0a27042b231a3baac6883cdd5557036f5e8df7139255feaac6", - "sha256:ad28cff53f60d99a928dfcf1e861e0b2ceb2bc1f08a074fdd601b314e1cc9e0a", - "sha256:b9097a208875fc7bbeb1286d0125d90bdfed961f61f214d3f5be62cd4ed8a446", - "sha256:b97fe7d7991c25e6a31e5d5e795986b18fbbb3107b873d5f3ae6dc9a103278e9", - "sha256:e0ec52ba3c7f1b7d813cd52649a5b3ef1fc0d433219dc8c93827c57eab6cf888", - "sha256:ea2c3ffb662fec8bbbfce5602e2c159ff097a4631d96235fcf0fb00e59e3ece4", - "sha256:fa3dec4ba8fb6e662770b74f62f1a0c7d4e37e25b58b2bf2c1be4c95372b4a33", - "sha256:fbeb725c9dc799a574518109336acccaf1303c30d45c075c665c0793c2f79a7f" + "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", + "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069", + "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2", + "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", + "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", + "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70", + "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", + "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", + "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", + "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", + "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", + "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f", + "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947", + "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", + "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", + "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", + "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66", + "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", + "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f", + "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", + "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", + "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", + "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", + "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1", + "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", + "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", + "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0" ], "markers": "python_version >= '3.7'", - "version": "==42.0.2" + "version": "==43.0.0" }, "docker": { "hashes": [ - "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b", - "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3" + "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", + "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0" ], "markers": "python_version >= '3.8'", - "version": "==7.0.0" + "version": "==7.1.0" }, "execnet": { "hashes": [ - "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41", - "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af" + "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", + "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3" ], - "markers": "python_version >= '3.7'", - "version": "==2.0.2" + "markers": "python_version >= '3.8'", + "version": "==2.1.1" }, "idna": { "hashes": [ - "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca", - "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f" + "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", + "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" ], "markers": "python_version >= '3.5'", - "version": "==3.6" + "version": "==3.7" }, "iniconfig": { "hashes": [ @@ -250,56 +245,58 @@ }, "packaging": { "hashes": [ - "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", - "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" + "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" ], - "markers": "python_version >= '3.7'", - "version": "==23.2" + "markers": "python_version >= '3.8'", + "version": "==24.1" }, "pluggy": { "hashes": [ - "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", - "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" ], "markers": "python_version >= '3.8'", - "version": "==1.4.0" + "version": "==1.5.0" }, "psutil": { "hashes": [ - "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d", - "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73", - "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8", - "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2", - "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e", - "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36", - "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7", - "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", - "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee", - "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", - "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", - "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", - "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", - "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631", - "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", - "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8" + "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", + "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0", + "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c", + "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", + "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", + "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c", + "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", + "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3", + "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", + "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", + "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6", + "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", + "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c", + "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", + "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", + "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14", + "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==5.9.8" + "version": "==6.0.0" }, "pycparser": { "hashes": [ - "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", - "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" + "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", + "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" ], - "version": "==2.21" + "markers": "python_version >= '3.8'", + "version": "==2.22" }, "pytest": { "hashes": [ - "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", - "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" + "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6", + "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c" ], "markers": "python_version >= '3.8'", - "version": "==8.0.0" + "version": "==8.3.1" }, "pytest-cs": { "git": "https://github.com/crowdsecurity/pytest-cs.git", @@ -327,6 +324,7 @@ "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", + "markers": "python_version >= '3.7'", "version": "==3.5.0" }, "python-dotenv": { @@ -396,11 +394,11 @@ }, "requests": { "hashes": [ - "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", - "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" ], - "markers": "python_version >= '3.7'", - "version": "==2.31.0" + "markers": "python_version >= '3.8'", + "version": "==2.32.3" }, "trustme": { "hashes": [ @@ -412,11 +410,11 @@ }, "urllib3": { "hashes": [ - "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20", - "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224" + "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", + "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" ], "markers": "python_version >= '3.8'", - "version": "==2.2.0" + "version": "==2.2.2" } }, "develop": { @@ -482,15 +480,16 @@ "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726" ], "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.13" }, "ipython": { "hashes": [ - "sha256:1050a3ab8473488d7eee163796b02e511d0735cf43a04ba2a8348bd0f2eaf8a5", - "sha256:48fbc236fbe0e138b88773fa0437751f14c3645fb483f1d4c5dee58b37e5ce73" + "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c", + "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff" ], "markers": "python_version >= '3.11'", - "version": "==8.21.0" + "version": "==8.26.0" }, "jedi": { "hashes": [ @@ -502,35 +501,35 @@ }, "matplotlib-inline": { "hashes": [ - "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311", - "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304" + "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", + "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca" ], - "markers": "python_version >= '3.5'", - "version": "==0.1.6" + "markers": "python_version >= '3.8'", + "version": "==0.1.7" }, "parso": { "hashes": [ - "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0", - "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75" + "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", + "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d" ], "markers": "python_version >= '3.6'", - "version": "==0.8.3" + "version": "==0.8.4" }, "pexpect": { "hashes": [ "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" ], - "markers": "sys_platform != 'win32'", + "markers": "sys_platform != 'win32' and sys_platform != 'emscripten'", "version": "==4.9.0" }, "prompt-toolkit": { "hashes": [ - "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d", - "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6" + "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", + "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360" ], "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.43" + "version": "==3.0.47" }, "ptyprocess": { "hashes": [ @@ -541,18 +540,18 @@ }, "pure-eval": { "hashes": [ - "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350", - "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3" + "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", + "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" ], - "version": "==0.2.2" + "version": "==0.2.3" }, "pygments": { "hashes": [ - "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c", - "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367" + "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", + "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a" ], - "markers": "python_version >= '3.7'", - "version": "==2.17.2" + "markers": "python_version >= '3.8'", + "version": "==2.18.0" }, "six": { "hashes": [ @@ -571,11 +570,11 @@ }, "traitlets": { "hashes": [ - "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74", - "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e" + "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", + "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f" ], "markers": "python_version >= '3.8'", - "version": "==5.14.1" + "version": "==5.14.3" }, "wcwidth": { "hashes": [ From 51d6f84ec7f601ba0fcd7aff99c213d7c999f634 Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 22 Jul 2024 14:29:10 +0200 Subject: [PATCH 104/119] long label for origin "cscli", "crowdsec" --- cmd/crowdsec-cli/climetrics/statbouncer.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 1a803cefbd2..f16eb93fb10 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -275,8 +275,13 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor metrics := s.aggregated[bouncerName][origin] // some users don't know what capi is - if origin == "CAPI" { + switch origin { + case "CAPI": origin += " (community blocklist)" + case "cscli": + origin += " (manual decisions)" + case "crowdsec": + origin += " (security engine)" } row := table.Row{origin} From a585ba010c439628f0a0b170e83b29880f33f15e Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 22 Jul 2024 23:49:51 +0200 Subject: [PATCH 105/119] dont' sum active_decisions --- cmd/crowdsec-cli/climetrics/statbouncer.go | 41 +++++++--- test/bats/08_metrics_bouncer.bats | 91 +++++++++++++++++++++- 2 files changed, 117 insertions(+), 15 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index f16eb93fb10..8d60a99ba79 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -15,12 +15,14 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" ) // un-aggregated data, de-normalized. type bouncerMetricItem struct { + collectedAt time.Time bouncerName string ipType string origin string @@ -72,11 +74,16 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { metrics, err := db.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeRC), - ).All(ctx) + ). + // we will process metrics ordered by timestamp, so that active_decisions + // can override previous values + Order(ent.Asc(metric.FieldCollectedAt)). + All(ctx) if err != nil { return fmt.Errorf("unable to fetch metrics: %w", err) } + // keep the oldest timestamp for each bouncer s.oldestTS = make(map[string]*time.Time) // don't spam the user with the same warnings @@ -135,6 +142,7 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { value := *item.Value rawMetric := bouncerMetricItem{ + collectedAt: collectedAt, bouncerName: bouncerName, ipType: labels["ip_type"], origin: labels["origin"], @@ -180,21 +188,32 @@ func (s *statBouncer) aggregate() { s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 } - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) - - if _, ok := s.aggregatedAllOrigin[raw.bouncerName]; !ok { - s.aggregatedAllOrigin[raw.bouncerName] = make(map[string]map[string]int64) + if raw.name == "active_decisions" { + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = int64(raw.value) + } else { + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) } + } - if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name]; !ok { - s.aggregatedAllOrigin[raw.bouncerName][raw.name] = make(map[string]int64) + for bouncerName := range s.aggregated { + if _, ok := s.aggregatedAllOrigin[bouncerName]; !ok { + s.aggregatedAllOrigin[bouncerName] = make(map[string]map[string]int64) } + for origin := range s.aggregated[bouncerName] { + for name := range s.aggregated[bouncerName][origin] { + if _, ok := s.aggregatedAllOrigin[bouncerName][name]; !ok { + s.aggregatedAllOrigin[bouncerName][name] = make(map[string]int64) + } + for unit := range s.aggregated[bouncerName][origin][name] { + if _, ok := s.aggregatedAllOrigin[bouncerName][name][unit]; !ok { + s.aggregatedAllOrigin[bouncerName][name][unit] = 0 + } - if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit]; !ok { - s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] = 0 + value := s.aggregated[bouncerName][origin][name][unit] + s.aggregatedAllOrigin[bouncerName][name][unit] += value + } + } } - - s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] += int64(raw.value) } } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 7390c204c57..2762aed48d4 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -136,7 +136,7 @@ teardown() { { "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, "items":[ - {"name": "active_decisions", "unit": "ip", "value": 51936, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 500, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, @@ -190,7 +190,7 @@ teardown() { }, "lists:firehol_voipbl": { "active_decisions": { - "ip": 51936 + "ip": 500 }, "dropped": { "byte": 3847, @@ -218,9 +218,92 @@ teardown() { | cscli (manual decisions) | 1 | 380 | 10 | - | - | | lists:anotherlist | - | 0 | 0 | - | - | | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | - | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | + | lists:firehol_voipbl | 500 | 3.85k | 58 | - | - | +----------------------------------+------------------+---------+---------+---------+-------+ - | Total | 51.94k | 9.06k | 191 | 2 | 5 | + | Total | 501 | 9.06k | 191 | 2 | 5 | + +----------------------------------+------------------+---------+---------+---------+-------+ + EOT + + # active_decisions is not a counter: new values override the old ones + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 250, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 10, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + assert_json '{ + "bouncers": { + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 10 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 250 + }, + "dropped": { + "byte": 3847, + "packet": 58 + }, + }, + "lists:anotherlist": { + "dropped": { + "byte": 0, + "packet": 0 + } + } + } + } + }' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------------+------------------+-------------------+-----------------+ + | Origin | active_decisions | dropped | foo | + | | IPs | bytes | packets | dogyear | pound | + +----------------------------------+------------------+---------+---------+---------+-------+ + | CAPI (community blocklist) | - | 3.80k | 100 | - | - | + | cscli (manual decisions) | 10 | 380 | 10 | - | - | + | lists:anotherlist | - | 0 | 0 | - | - | + | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | + | lists:firehol_voipbl | 250 | 3.85k | 58 | - | - | + +----------------------------------+------------------+---------+---------+---------+-------+ + | Total | 260 | 9.06k | 191 | 2 | 5 | +----------------------------------+------------------+---------+---------+---------+-------+ EOT From 3af62dcd9b4cdbd6c8fa961e84afd749fca64b8b Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 10:15:23 +0200 Subject: [PATCH 106/119] handle _gauge suffix on metrics names --- cmd/crowdsec-cli/climetrics/statbouncer.go | 41 +++++++++----- test/bats/08_metrics_bouncer.bats | 63 +++++++++++++++++++++- 2 files changed, 90 insertions(+), 14 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 8d60a99ba79..bbdaf0a4623 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "strings" "time" "github.com/jedib0t/go-pretty/v6/table" @@ -161,6 +162,30 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { return nil } +// return true if the metric is a gauge and should not be aggregated +func (statBouncer) isGauge(name string) bool { + return name == "active_decisions" || strings.HasSuffix(name, "_gauge") +} + +// formatMetricName returns the metric name to display in the table header +func (statBouncer) formatMetricName(name string) string { + return strings.TrimSuffix(name, "_gauge") +} + +// formatMetricOrigin returns the origin to display in the table rows +// (for example, some users don't know what capi is) +func (statBouncer) formatMetricOrigin(origin string) string { + switch origin { + case "CAPI": + origin += " (community blocklist)" + case "cscli": + origin += " (manual decisions)" + case "crowdsec": + origin += " (security engine)" + } + return origin +} + func (s *statBouncer) aggregate() { // [bouncer][origin][name][unit]value if s.aggregated == nil { @@ -188,7 +213,7 @@ func (s *statBouncer) aggregate() { s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 } - if raw.name == "active_decisions" { + if s.isGauge(raw.name) { s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = int64(raw.value) } else { s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) @@ -257,7 +282,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor for _, unit := range maptools.SortedKeys(columns[name]) { colNum += 1 - header1 = append(header1, name) + header1 = append(header1, s.formatMetricName(name)) // we don't add "s" to random words if knownPlurals[unit] != "" { @@ -293,17 +318,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor metrics := s.aggregated[bouncerName][origin] - // some users don't know what capi is - switch origin { - case "CAPI": - origin += " (community blocklist)" - case "cscli": - origin += " (manual decisions)" - case "crowdsec": - origin += " (security engine)" - } - - row := table.Row{origin} + row := table.Row{s.formatMetricOrigin(origin)} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 2762aed48d4..fae0b6b060c 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -224,7 +224,7 @@ teardown() { +----------------------------------+------------------+---------+---------+---------+-------+ EOT - # active_decisions is not a counter: new values override the old ones + # active_decisions is actually a gauge: values should not be aggregated, keep only the latest one payload=$(yq -o j ' .remediation_components[0].metrics = [ @@ -306,6 +306,67 @@ teardown() { | Total | 260 | 9.06k | 191 | 2 | 5 | +----------------------------------+------------------+---------+---------+---------+-------+ EOT +} + +@test "rc usage metrics (unknown metrics)" { + # new metrics are introduced in a new bouncer version, unknown by this version of cscli: some are gauges, some are not + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + log_processors: [] + EOT + ) + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 20, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 10, "labels": {"origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707460000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 30, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 15, "labels": {"origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers: {testbouncer: {cscli: {ima_gauge: {second: 30}, notagauge: {inch: 25}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +--------------------------+--------+-----------+ + | Origin | ima | notagauge | + | | second | inch | + +--------------------------+--------+-----------+ + | cscli (manual decisions) | 30 | 25 | + +--------------------------+--------+-----------+ + | Total | 30 | 25 | + +--------------------------+--------+-----------+ + EOT # TODO: multiple item lists From bb8171a6a76cce5b8ffa64c1234a1db32e76e3dd Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 12:23:18 +0200 Subject: [PATCH 107/119] explicitly de-duplicate raw metrics --- cmd/crowdsec-cli/climetrics/statbouncer.go | 54 +++++++++++++------- pkg/apiserver/controllers/v1/usagemetrics.go | 12 ++--- pkg/database/ent/metric.go | 16 +++--- pkg/database/ent/metric/metric.go | 12 ++--- pkg/database/ent/metric/where.go | 54 ++++++++++---------- pkg/database/ent/metric_create.go | 16 +++--- pkg/database/ent/migrate/schema.go | 9 +--- pkg/database/ent/mutation.go | 52 +++++++++---------- pkg/database/ent/schema/metric.go | 13 +---- pkg/database/flush.go | 2 +- pkg/database/metrics.go | 52 +++---------------- test/bats/08_metrics_bouncer.bats | 21 +++----- 12 files changed, 131 insertions(+), 182 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index bbdaf0a4623..d1cf9dd5bdb 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "sort" "strings" "time" @@ -16,7 +17,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -34,7 +34,7 @@ type bouncerMetricItem struct { type statBouncer struct { // oldest collection timestamp for each bouncer - oldestTS map[string]*time.Time + oldestTS map[string]time.Time // we keep de-normalized metrics so we can iterate // over them multiple times and keep the aggregation code simple rawMetrics []bouncerMetricItem @@ -78,26 +78,23 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { ). // we will process metrics ordered by timestamp, so that active_decisions // can override previous values - Order(ent.Asc(metric.FieldCollectedAt)). All(ctx) if err != nil { return fmt.Errorf("unable to fetch metrics: %w", err) } // keep the oldest timestamp for each bouncer - s.oldestTS = make(map[string]*time.Time) + s.oldestTS = make(map[string]time.Time, 0) // don't spam the user with the same warnings warningsLogged := make(map[string]bool) + // store raw metrics, de-duplicated in case some were sent multiple times + uniqueRaw := make(map[bouncerMetricItem]struct{}) + for _, met := range metrics { bouncerName := met.GeneratedBy - collectedAt := met.CollectedAt - if s.oldestTS[bouncerName] == nil || collectedAt.Before(*s.oldestTS[bouncerName]) { - s.oldestTS[bouncerName] = &collectedAt - } - type bouncerMetrics struct { Metrics []models.DetailedMetrics `json:"metrics"` } @@ -114,6 +111,18 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { for _, item := range m.Items { labels := item.Labels + var collectedAt time.Time + if m.Meta.UtcNowTimestamp != nil { + // if one of these timestamps is nil, we'll just keep the zero value (!!) + // and no timestamp will be displayed in the table title + // maybe a warning is in order, and skip like we do with name etc ? + collectedAt = time.Unix(*m.Meta.UtcNowTimestamp, 0).UTC() + } + + if s.oldestTS[bouncerName].IsZero() || collectedAt.Before(s.oldestTS[bouncerName]) { + s.oldestTS[bouncerName] = collectedAt + } + // these are mandatory but we got pointers, so... valid := true @@ -138,25 +147,34 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { continue } - name := *item.Name - unit := *item.Unit - value := *item.Value - rawMetric := bouncerMetricItem{ collectedAt: collectedAt, bouncerName: bouncerName, ipType: labels["ip_type"], origin: labels["origin"], - name: name, - unit: unit, - value: value, + name: *item.Name, + unit: *item.Unit, + value: *item.Value, } - s.rawMetrics = append(s.rawMetrics, rawMetric) + uniqueRaw[rawMetric] = struct{}{} } } } + // extract raw metric structs + keys := make([]bouncerMetricItem, 0, len(uniqueRaw)) + for key := range uniqueRaw { + keys = append(keys, key) + } + + // order them by timestamp + sort.Slice(keys, func(i, j int) bool { + return keys[i].collectedAt.Before(keys[j].collectedAt) + }) + + s.rawMetrics = keys + s.aggregate() return nil @@ -358,7 +376,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s (%s)", title, bouncerName) if s.oldestTS != nil { - // if we change this to .Local() beware of tests + // if you change this to .Local() beware of tests title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index a33b6e033fd..74f27bb6cf4 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -61,7 +61,6 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { var ( generatedType metric.GeneratedType generatedBy string - collectedAt time.Time ) bouncer, _ := getBouncerFromContext(gctx) @@ -181,13 +180,6 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } - if baseMetrics.Metrics != nil && len(baseMetrics.Metrics) > 0 { - collectedAt = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0).UTC() - } else { - // if there's no timestamp, use the current time - collectedAt = time.Now().UTC() - } - jsonPayload, err := json.Marshal(payload) if err != nil { logger.Errorf("Failed to marshal usage metrics: %s", err) @@ -196,7 +188,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { return } - if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, collectedAt, string(jsonPayload)); err != nil { + receivedAt := time.Now().UTC() + + if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { logger.Error(err) c.HandleDBErrors(gctx, err) diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go index 236d54da25d..47f3b4df4e5 100644 --- a/pkg/database/ent/metric.go +++ b/pkg/database/ent/metric.go @@ -22,8 +22,8 @@ type Metric struct { // Source of the metrics: machine id, bouncer name... // It must come from the auth middleware. GeneratedBy string `json:"generated_by,omitempty"` - // When the metrics are collected/calculated at the source - CollectedAt time.Time `json:"collected_at,omitempty"` + // When the metrics are received by LAPI + ReceivedAt time.Time `json:"received_at,omitempty"` // When the metrics are sent to the console PushedAt *time.Time `json:"pushed_at,omitempty"` // The actual metrics (item0) @@ -40,7 +40,7 @@ func (*Metric) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullInt64) case metric.FieldGeneratedType, metric.FieldGeneratedBy, metric.FieldPayload: values[i] = new(sql.NullString) - case metric.FieldCollectedAt, metric.FieldPushedAt: + case metric.FieldReceivedAt, metric.FieldPushedAt: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -75,11 +75,11 @@ func (m *Metric) assignValues(columns []string, values []any) error { } else if value.Valid { m.GeneratedBy = value.String } - case metric.FieldCollectedAt: + case metric.FieldReceivedAt: if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field collected_at", values[i]) + return fmt.Errorf("unexpected type %T for field received_at", values[i]) } else if value.Valid { - m.CollectedAt = value.Time + m.ReceivedAt = value.Time } case metric.FieldPushedAt: if value, ok := values[i].(*sql.NullTime); !ok { @@ -136,8 +136,8 @@ func (m *Metric) String() string { builder.WriteString("generated_by=") builder.WriteString(m.GeneratedBy) builder.WriteString(", ") - builder.WriteString("collected_at=") - builder.WriteString(m.CollectedAt.Format(time.ANSIC)) + builder.WriteString("received_at=") + builder.WriteString(m.ReceivedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.PushedAt; v != nil { builder.WriteString("pushed_at=") diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go index 879f1006d64..78e88982220 100644 --- a/pkg/database/ent/metric/metric.go +++ b/pkg/database/ent/metric/metric.go @@ -17,8 +17,8 @@ const ( FieldGeneratedType = "generated_type" // FieldGeneratedBy holds the string denoting the generated_by field in the database. FieldGeneratedBy = "generated_by" - // FieldCollectedAt holds the string denoting the collected_at field in the database. - FieldCollectedAt = "collected_at" + // FieldReceivedAt holds the string denoting the received_at field in the database. + FieldReceivedAt = "received_at" // FieldPushedAt holds the string denoting the pushed_at field in the database. FieldPushedAt = "pushed_at" // FieldPayload holds the string denoting the payload field in the database. @@ -32,7 +32,7 @@ var Columns = []string{ FieldID, FieldGeneratedType, FieldGeneratedBy, - FieldCollectedAt, + FieldReceivedAt, FieldPushedAt, FieldPayload, } @@ -88,9 +88,9 @@ func ByGeneratedBy(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldGeneratedBy, opts...).ToFunc() } -// ByCollectedAt orders the results by the collected_at field. -func ByCollectedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCollectedAt, opts...).ToFunc() +// ByReceivedAt orders the results by the received_at field. +func ByReceivedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldReceivedAt, opts...).ToFunc() } // ByPushedAt orders the results by the pushed_at field. diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go index e49f80f3411..72bd9d93cd7 100644 --- a/pkg/database/ent/metric/where.go +++ b/pkg/database/ent/metric/where.go @@ -59,9 +59,9 @@ func GeneratedBy(v string) predicate.Metric { return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) } -// CollectedAt applies equality check predicate on the "collected_at" field. It's identical to CollectedAtEQ. -func CollectedAt(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +// ReceivedAt applies equality check predicate on the "received_at" field. It's identical to ReceivedAtEQ. +func ReceivedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldReceivedAt, v)) } // PushedAt applies equality check predicate on the "pushed_at" field. It's identical to PushedAtEQ. @@ -159,44 +159,44 @@ func GeneratedByContainsFold(v string) predicate.Metric { return predicate.Metric(sql.FieldContainsFold(FieldGeneratedBy, v)) } -// CollectedAtEQ applies the EQ predicate on the "collected_at" field. -func CollectedAtEQ(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +// ReceivedAtEQ applies the EQ predicate on the "received_at" field. +func ReceivedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldReceivedAt, v)) } -// CollectedAtNEQ applies the NEQ predicate on the "collected_at" field. -func CollectedAtNEQ(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldNEQ(FieldCollectedAt, v)) +// ReceivedAtNEQ applies the NEQ predicate on the "received_at" field. +func ReceivedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldReceivedAt, v)) } -// CollectedAtIn applies the In predicate on the "collected_at" field. -func CollectedAtIn(vs ...time.Time) predicate.Metric { - return predicate.Metric(sql.FieldIn(FieldCollectedAt, vs...)) +// ReceivedAtIn applies the In predicate on the "received_at" field. +func ReceivedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldReceivedAt, vs...)) } -// CollectedAtNotIn applies the NotIn predicate on the "collected_at" field. -func CollectedAtNotIn(vs ...time.Time) predicate.Metric { - return predicate.Metric(sql.FieldNotIn(FieldCollectedAt, vs...)) +// ReceivedAtNotIn applies the NotIn predicate on the "received_at" field. +func ReceivedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldReceivedAt, vs...)) } -// CollectedAtGT applies the GT predicate on the "collected_at" field. -func CollectedAtGT(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldGT(FieldCollectedAt, v)) +// ReceivedAtGT applies the GT predicate on the "received_at" field. +func ReceivedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldReceivedAt, v)) } -// CollectedAtGTE applies the GTE predicate on the "collected_at" field. -func CollectedAtGTE(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldGTE(FieldCollectedAt, v)) +// ReceivedAtGTE applies the GTE predicate on the "received_at" field. +func ReceivedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldReceivedAt, v)) } -// CollectedAtLT applies the LT predicate on the "collected_at" field. -func CollectedAtLT(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldLT(FieldCollectedAt, v)) +// ReceivedAtLT applies the LT predicate on the "received_at" field. +func ReceivedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldReceivedAt, v)) } -// CollectedAtLTE applies the LTE predicate on the "collected_at" field. -func CollectedAtLTE(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldLTE(FieldCollectedAt, v)) +// ReceivedAtLTE applies the LTE predicate on the "received_at" field. +func ReceivedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldReceivedAt, v)) } // PushedAtEQ applies the EQ predicate on the "pushed_at" field. diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go index 8fa656db427..973cddd41d0 100644 --- a/pkg/database/ent/metric_create.go +++ b/pkg/database/ent/metric_create.go @@ -32,9 +32,9 @@ func (mc *MetricCreate) SetGeneratedBy(s string) *MetricCreate { return mc } -// SetCollectedAt sets the "collected_at" field. -func (mc *MetricCreate) SetCollectedAt(t time.Time) *MetricCreate { - mc.mutation.SetCollectedAt(t) +// SetReceivedAt sets the "received_at" field. +func (mc *MetricCreate) SetReceivedAt(t time.Time) *MetricCreate { + mc.mutation.SetReceivedAt(t) return mc } @@ -103,8 +103,8 @@ func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedBy(); !ok { return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} } - if _, ok := mc.mutation.CollectedAt(); !ok { - return &ValidationError{Name: "collected_at", err: errors.New(`ent: missing required field "Metric.collected_at"`)} + if _, ok := mc.mutation.ReceivedAt(); !ok { + return &ValidationError{Name: "received_at", err: errors.New(`ent: missing required field "Metric.received_at"`)} } if _, ok := mc.mutation.Payload(); !ok { return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} @@ -143,9 +143,9 @@ func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { _spec.SetField(metric.FieldGeneratedBy, field.TypeString, value) _node.GeneratedBy = value } - if value, ok := mc.mutation.CollectedAt(); ok { - _spec.SetField(metric.FieldCollectedAt, field.TypeTime, value) - _node.CollectedAt = value + if value, ok := mc.mutation.ReceivedAt(); ok { + _spec.SetField(metric.FieldReceivedAt, field.TypeTime, value) + _node.ReceivedAt = value } if value, ok := mc.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 60bf72a486b..986f5bc8c67 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -254,7 +254,7 @@ var ( {Name: "id", Type: field.TypeInt, Increment: true}, {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, {Name: "generated_by", Type: field.TypeString}, - {Name: "collected_at", Type: field.TypeTime}, + {Name: "received_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, {Name: "payload", Type: field.TypeString, Size: 2147483647}, } @@ -263,13 +263,6 @@ var ( Name: "metrics", Columns: MetricsColumns, PrimaryKey: []*schema.Column{MetricsColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "metric_generated_type_generated_by_collected_at", - Unique: true, - Columns: []*schema.Column{MetricsColumns[1], MetricsColumns[2], MetricsColumns[3]}, - }, - }, } // Tables holds all the tables in the schema. Tables = []*schema.Table{ diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 5b70457c512..5c6596f3db4 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -8640,7 +8640,7 @@ type MetricMutation struct { id *int generated_type *metric.GeneratedType generated_by *string - collected_at *time.Time + received_at *time.Time pushed_at *time.Time payload *string clearedFields map[string]struct{} @@ -8819,40 +8819,40 @@ func (m *MetricMutation) ResetGeneratedBy() { m.generated_by = nil } -// SetCollectedAt sets the "collected_at" field. -func (m *MetricMutation) SetCollectedAt(t time.Time) { - m.collected_at = &t +// SetReceivedAt sets the "received_at" field. +func (m *MetricMutation) SetReceivedAt(t time.Time) { + m.received_at = &t } -// CollectedAt returns the value of the "collected_at" field in the mutation. -func (m *MetricMutation) CollectedAt() (r time.Time, exists bool) { - v := m.collected_at +// ReceivedAt returns the value of the "received_at" field in the mutation. +func (m *MetricMutation) ReceivedAt() (r time.Time, exists bool) { + v := m.received_at if v == nil { return } return *v, true } -// OldCollectedAt returns the old "collected_at" field's value of the Metric entity. +// OldReceivedAt returns the old "received_at" field's value of the Metric entity. // If the Metric object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetricMutation) OldCollectedAt(ctx context.Context) (v time.Time, err error) { +func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCollectedAt is only allowed on UpdateOne operations") + return v, errors.New("OldReceivedAt is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCollectedAt requires an ID field in the mutation") + return v, errors.New("OldReceivedAt requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldCollectedAt: %w", err) + return v, fmt.Errorf("querying old value for OldReceivedAt: %w", err) } - return oldValue.CollectedAt, nil + return oldValue.ReceivedAt, nil } -// ResetCollectedAt resets all changes to the "collected_at" field. -func (m *MetricMutation) ResetCollectedAt() { - m.collected_at = nil +// ResetReceivedAt resets all changes to the "received_at" field. +func (m *MetricMutation) ResetReceivedAt() { + m.received_at = nil } // SetPushedAt sets the "pushed_at" field. @@ -8981,8 +8981,8 @@ func (m *MetricMutation) Fields() []string { if m.generated_by != nil { fields = append(fields, metric.FieldGeneratedBy) } - if m.collected_at != nil { - fields = append(fields, metric.FieldCollectedAt) + if m.received_at != nil { + fields = append(fields, metric.FieldReceivedAt) } if m.pushed_at != nil { fields = append(fields, metric.FieldPushedAt) @@ -9002,8 +9002,8 @@ func (m *MetricMutation) Field(name string) (ent.Value, bool) { return m.GeneratedType() case metric.FieldGeneratedBy: return m.GeneratedBy() - case metric.FieldCollectedAt: - return m.CollectedAt() + case metric.FieldReceivedAt: + return m.ReceivedAt() case metric.FieldPushedAt: return m.PushedAt() case metric.FieldPayload: @@ -9021,8 +9021,8 @@ func (m *MetricMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldGeneratedType(ctx) case metric.FieldGeneratedBy: return m.OldGeneratedBy(ctx) - case metric.FieldCollectedAt: - return m.OldCollectedAt(ctx) + case metric.FieldReceivedAt: + return m.OldReceivedAt(ctx) case metric.FieldPushedAt: return m.OldPushedAt(ctx) case metric.FieldPayload: @@ -9050,12 +9050,12 @@ func (m *MetricMutation) SetField(name string, value ent.Value) error { } m.SetGeneratedBy(v) return nil - case metric.FieldCollectedAt: + case metric.FieldReceivedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCollectedAt(v) + m.SetReceivedAt(v) return nil case metric.FieldPushedAt: v, ok := value.(time.Time) @@ -9135,8 +9135,8 @@ func (m *MetricMutation) ResetField(name string) error { case metric.FieldGeneratedBy: m.ResetGeneratedBy() return nil - case metric.FieldCollectedAt: - m.ResetCollectedAt() + case metric.FieldReceivedAt: + m.ResetReceivedAt() return nil case metric.FieldPushedAt: m.ResetPushedAt() diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index b47da78bdf3..319c67b7aa7 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -3,7 +3,6 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" ) // Metric is actually a set of metrics collected by a device @@ -21,9 +20,9 @@ func (Metric) Fields() []ent.Field { field.String("generated_by"). Immutable(). Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), - field.Time("collected_at"). + field.Time("received_at"). Immutable(). - Comment("When the metrics are collected/calculated at the source"), + Comment("When the metrics are received by LAPI"), field.Time("pushed_at"). Nillable(). Optional(). @@ -33,11 +32,3 @@ func (Metric) Fields() []ent.Field { Comment("The actual metrics (item0)"), } } - -func (Metric) Indexes() []ent.Index { - return []ent.Index{ - // Don't store the same metrics multiple times. - index.Fields("generated_type", "generated_by", "collected_at"). - Unique(), - } -} diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 580235bc8f1..5d53d10c942 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -128,7 +128,7 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { c.Log.Debugf("flushing metrics older than %s", maxAge) deleted, err := c.Ent.Metric.Delete().Where( - metric.CollectedAtLTE(time.Now().UTC().Add(-*maxAge)), + metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), ).Exec(c.CTX) if err != nil { c.Log.Errorf("while flushing metrics: %s", err) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 8c2c890e490..3bc5e7b5d32 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -8,45 +8,22 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) -func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, collectedAt time.Time, payload string) (*ent.Metric, error) { +func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. Create(). SetGeneratedType(generatedType). SetGeneratedBy(generatedBy). - SetCollectedAt(collectedAt). + SetReceivedAt(receivedAt). SetPayload(payload). Save(c.CTX) - - switch { - case ent.IsConstraintError(err): - // pretty safe guess, it's the unique index - c.Log.Infof("storing metrics snapshot for '%s' at %s: already exists", generatedBy, collectedAt) - // it's polite to accept a duplicate snapshot without any error - return nil, nil - case err != nil: + if err != nil { c.Log.Warningf("CreateMetric: %s", err) - return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, collectedAt, InsertFail) + return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) } return metric, nil } -func (c *Client) GetLPsUsageMetrics() ([]*ent.Metric, error) { - metrics, err := c.Ent.Metric.Query(). - Where( - metric.GeneratedTypeEQ(metric.GeneratedTypeLP), - metric.PushedAtIsNil(), - ). - Order(ent.Desc(metric.FieldCollectedAt)). - All(c.CTX) - if err != nil { - c.Log.Warningf("GetLPsUsageMetrics: %s", err) - return nil, fmt.Errorf("getting LPs usage metrics: %w", err) - } - - return metrics, nil -} - func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( @@ -54,7 +31,8 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, metric.GeneratedByEQ(machineId), metric.PushedAtIsNil(), ). - Order(ent.Desc(metric.FieldCollectedAt)). + // XXX: do we need to sort? + Order(ent.Desc(metric.FieldReceivedAt)). All(c.CTX) if err != nil { c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) @@ -64,22 +42,6 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, return metrics, nil } -func (c *Client) GetBouncersUsageMetrics() ([]*ent.Metric, error) { - metrics, err := c.Ent.Metric.Query(). - Where( - metric.GeneratedTypeEQ(metric.GeneratedTypeRC), - metric.PushedAtIsNil(), - ). - Order(ent.Desc(metric.FieldCollectedAt)). - All(c.CTX) - if err != nil { - c.Log.Warningf("GetBouncersUsageMetrics: %s", err) - return nil, fmt.Errorf("getting bouncers usage metrics: %w", err) - } - - return metrics, nil -} - func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( @@ -87,7 +49,7 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric metric.GeneratedByEQ(bouncerName), metric.PushedAtIsNil(), ). - Order(ent.Desc(metric.FieldCollectedAt)). + Order(ent.Desc(metric.FieldReceivedAt)). All(c.CTX) if err != nil { c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index fae0b6b060c..d17d48516c1 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -322,21 +322,6 @@ teardown() { EOT ) - payload=$(yq -o j ' - .remediation_components[0].metrics = [ - { - "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, - "items":[ - {"name": "ima_gauge", "unit": "second", "value": 20, "labels": {"origin": "cscli"}}, - {"name": "notagauge", "unit": "inch", "value": 10, "labels": {"origin": "cscli"}} - ] - } - ] | - .remediation_components[0].type = "crowdsec-firewall-bouncer" - ' <<<"$payload") - - rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" - payload=$(yq -o j ' .remediation_components[0].metrics = [ { @@ -345,6 +330,12 @@ teardown() { {"name": "ima_gauge", "unit": "second", "value": 30, "labels": {"origin": "cscli"}}, {"name": "notagauge", "unit": "inch", "value": 15, "labels": {"origin": "cscli"}} ] + }, { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 20, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 10, "labels": {"origin": "cscli"}} + ] } ] | .remediation_components[0].type = "crowdsec-firewall-bouncer" From 7d790fe139f1a28f8bee0848e0c2f6667f17281f Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 12:43:37 +0200 Subject: [PATCH 108/119] collection timestamp can't be nil so warn --- cmd/crowdsec-cli/climetrics/statbouncer.go | 24 ++++++++++------------ test/bats/08_metrics_bouncer.bats | 12 +++++++++++ 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index d1cf9dd5bdb..f8cfca0f642 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -108,22 +108,20 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { } for _, m := range payload.Metrics { - for _, item := range m.Items { - labels := item.Labels + // fields like timestamp, name, etc. are mandatory but we got pointers, so we check anyway + if m.Meta.UtcNowTimestamp == nil { + warnOnce(warningsLogged, "missing 'utc_now_timestamp' field in metrics reported by "+bouncerName) + continue + } - var collectedAt time.Time - if m.Meta.UtcNowTimestamp != nil { - // if one of these timestamps is nil, we'll just keep the zero value (!!) - // and no timestamp will be displayed in the table title - // maybe a warning is in order, and skip like we do with name etc ? - collectedAt = time.Unix(*m.Meta.UtcNowTimestamp, 0).UTC() - } + collectedAt := time.Unix(*m.Meta.UtcNowTimestamp, 0).UTC() - if s.oldestTS[bouncerName].IsZero() || collectedAt.Before(s.oldestTS[bouncerName]) { - s.oldestTS[bouncerName] = collectedAt - } + if s.oldestTS[bouncerName].IsZero() || collectedAt.Before(s.oldestTS[bouncerName]) { + s.oldestTS[bouncerName] = collectedAt + } - // these are mandatory but we got pointers, so... + for _, item := range m.Items { + labels := item.Labels valid := true diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index d17d48516c1..6c4bc26b935 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -74,6 +74,18 @@ teardown() { payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" refute_output + + payload=$(yq -o j '.remediation_components[0].metrics = [{"meta": {}}]' <<<"$payload") + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + remediation_components.0.metrics.0.items in body is required + validation failure list: + remediation_components.0.metrics.0.meta.utc_now_timestamp in body is required + remediation_components.0.metrics.0.meta.window_size_seconds in body is required + EOT } @test "rc usage metrics (good payload)" { From 5517acf897e2d4444d3ca89f676b2c28d84ca4b2 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 15:06:30 +0200 Subject: [PATCH 109/119] always aggregate over ipv4+ipv6 (even in json output) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 78 ++++++++++++++++------ test/bats/08_metrics_bouncer.bats | 57 ++++++++++++++-- 2 files changed, 112 insertions(+), 23 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index f8cfca0f642..3fb8e234249 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -38,7 +38,11 @@ type statBouncer struct { // we keep de-normalized metrics so we can iterate // over them multiple times and keep the aggregation code simple rawMetrics []bouncerMetricItem - aggregated map[string]map[string]map[string]map[string]int64 + // [bouncer][origin][name][unit][iptype]value + aggregated map[string]map[string]map[string]map[string]map[string]int64 + // [bouncer][origin][name][unit]value + aggregatedAllIPType map[string]map[string]map[string]map[string]int64 + // [bouncer][name][unit]value aggregatedAllOrigin map[string]map[string]map[string]int64 } @@ -49,7 +53,7 @@ var knownPlurals = map[string]string{ } func (s *statBouncer) MarshalJSON() ([]byte, error) { - return json.Marshal(s.aggregated) + return json.Marshal(s.aggregatedAllIPType) } func (s *statBouncer) Description() (string, string) { @@ -203,55 +207,91 @@ func (statBouncer) formatMetricOrigin(origin string) string { } func (s *statBouncer) aggregate() { - // [bouncer][origin][name][unit]value + // [bouncer][origin][name][unit][iptype]value if s.aggregated == nil { - s.aggregated = make(map[string]map[string]map[string]map[string]int64) + s.aggregated = make(map[string]map[string]map[string]map[string]map[string]int64) } if s.aggregatedAllOrigin == nil { s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) } + if s.aggregatedAllIPType == nil { + s.aggregatedAllIPType = make(map[string]map[string]map[string]map[string]int64) + } + + // first round, we aggregate over time if the metric is not of type "gauge" + for _, raw := range s.rawMetrics { if _, ok := s.aggregated[raw.bouncerName]; !ok { - s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) + s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]map[string]int64) } if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { - s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]int64) + s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]map[string]int64) } if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]int64) + s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]map[string]int64) } if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = make(map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = 0 } if s.isGauge(raw.name) { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = int64(raw.value) + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = int64(raw.value) } else { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] += int64(raw.value) } } + // second round, we always aggregate over ip type + for bouncerName := range s.aggregated { - if _, ok := s.aggregatedAllOrigin[bouncerName]; !ok { - s.aggregatedAllOrigin[bouncerName] = make(map[string]map[string]int64) + if _, ok := s.aggregatedAllIPType[bouncerName]; !ok { + s.aggregatedAllIPType[bouncerName] = make(map[string]map[string]map[string]int64) } for origin := range s.aggregated[bouncerName] { + if _, ok := s.aggregatedAllIPType[bouncerName][origin]; !ok { + s.aggregatedAllIPType[bouncerName][origin] = make(map[string]map[string]int64) + } for name := range s.aggregated[bouncerName][origin] { - if _, ok := s.aggregatedAllOrigin[bouncerName][name]; !ok { - s.aggregatedAllOrigin[bouncerName][name] = make(map[string]int64) + if _, ok := s.aggregatedAllIPType[bouncerName][origin][name]; !ok { + s.aggregatedAllIPType[bouncerName][origin][name] = make(map[string]int64) } for unit := range s.aggregated[bouncerName][origin][name] { - if _, ok := s.aggregatedAllOrigin[bouncerName][name][unit]; !ok { - s.aggregatedAllOrigin[bouncerName][name][unit] = 0 + if _, ok := s.aggregatedAllIPType[bouncerName][origin][name][unit]; !ok { + s.aggregatedAllIPType[bouncerName][origin][name][unit] = 0 } - value := s.aggregated[bouncerName][origin][name][unit] - s.aggregatedAllOrigin[bouncerName][name][unit] += value + for ipType := range s.aggregated[bouncerName][origin][name][unit] { + value := s.aggregated[bouncerName][origin][name][unit][ipType] + s.aggregatedAllIPType[bouncerName][origin][name][unit] += value + } + } + } + } + } + + // third round, we always aggregate over origin + + for bouncerName := range s.aggregatedAllIPType { + if _, ok := s.aggregatedAllOrigin[bouncerName]; !ok { + s.aggregatedAllOrigin[bouncerName] = make(map[string]map[string]int64) + } + for origin := range s.aggregatedAllIPType[bouncerName] { + for name := range s.aggregatedAllIPType[bouncerName][origin] { + if _, ok := s.aggregatedAllOrigin[bouncerName][name]; !ok { + s.aggregatedAllOrigin[bouncerName][name] = make(map[string]int64) + } + for unit := range s.aggregatedAllIPType[bouncerName][origin][name] { + val := s.aggregatedAllIPType[bouncerName][origin][name][unit] + s.aggregatedAllOrigin[bouncerName][name][unit] += val } } } @@ -332,7 +372,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor continue } - metrics := s.aggregated[bouncerName][origin] + metrics := s.aggregatedAllIPType[bouncerName][origin] row := table.Row{s.formatMetricOrigin(origin)} diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 6c4bc26b935..1851ed0ac14 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -127,7 +127,7 @@ teardown() { rune -0 cscli metrics show bouncers -o json # aggregation is ok -- we are truncating, not rounding, because the float is mandated by swagger. # but without labels the origin string is empty - assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' + assert_json '{bouncers:{testbouncer:{"": {foo: {dogyear: 2, pound: 5}}}}}' rune -0 cscli metrics show bouncers assert_output - <<-EOT @@ -148,7 +148,7 @@ teardown() { { "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, "items":[ - {"name": "active_decisions", "unit": "ip", "value": 500, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 500, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, @@ -244,7 +244,7 @@ teardown() { "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, "items":[ {"name": "active_decisions", "unit": "ip", "value": 250, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, - {"name": "active_decisions", "unit": "ip", "value": 10, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + {"name": "active_decisions", "unit": "ip", "value": 10, "labels": {"ip_type": "ipv6", "origin": "cscli"}} ] } ] | @@ -370,9 +370,58 @@ teardown() { | Total | 30 | 25 | +--------------------------+--------+-----------+ EOT +} + +@test "rc usage metrics (ipv4/ipv6)" { + # gauge metrics are not aggregated over time, but they are over ip type + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY - # TODO: multiple item lists + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + log_processors: [] + EOT + ) + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707460000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 200, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "active_decisions", "unit": "ip", "value": 30, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + }, { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 400, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "active_decisions", "unit": "ip", "value": 50, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers: {testbouncer: {cscli: {active_decisions: {ip: 230}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +--------------------------+------------------+ + | Origin | active_decisions | + | | IPs | + +--------------------------+------------------+ + | cscli (manual decisions) | 230 | + +--------------------------+------------------+ + | Total | 230 | + +--------------------------+------------------+ + EOT } @test "rc usage metrics (multiple bouncers)" { From c68646bfadfa2036ef228fdda9fec1bcc7952666 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 15:38:31 +0200 Subject: [PATCH 110/119] extract method extractRawMetrics() --- cmd/crowdsec-cli/climetrics/statbouncer.go | 68 ++++++++++++---------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 3fb8e234249..e1836a7a434 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -61,7 +62,7 @@ func (s *statBouncer) Description() (string, string) { `Network traffic blocked by bouncers.` } -func warnOnce(warningsLogged map[string]bool, msg string) { +func logWarningOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { log.Warningf(msg) @@ -69,26 +70,8 @@ func warnOnce(warningsLogged map[string]bool, msg string) { } } -func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { - if db == nil { - return nil - } - - // query all bouncer metrics that have not been flushed - - metrics, err := db.Ent.Metric.Query(). - Where( - metric.GeneratedTypeEQ(metric.GeneratedTypeRC), - ). - // we will process metrics ordered by timestamp, so that active_decisions - // can override previous values - All(ctx) - if err != nil { - return fmt.Errorf("unable to fetch metrics: %w", err) - } - - // keep the oldest timestamp for each bouncer - s.oldestTS = make(map[string]time.Time, 0) +func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricItem, map[string]time.Time) { + oldestTS := make(map[string]time.Time, 0) // don't spam the user with the same warnings warningsLogged := make(map[string]bool) @@ -114,14 +97,14 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { for _, m := range payload.Metrics { // fields like timestamp, name, etc. are mandatory but we got pointers, so we check anyway if m.Meta.UtcNowTimestamp == nil { - warnOnce(warningsLogged, "missing 'utc_now_timestamp' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'utc_now_timestamp' field in metrics reported by "+bouncerName) continue } collectedAt := time.Unix(*m.Meta.UtcNowTimestamp, 0).UTC() - if s.oldestTS[bouncerName].IsZero() || collectedAt.Before(s.oldestTS[bouncerName]) { - s.oldestTS[bouncerName] = collectedAt + if oldestTS[bouncerName].IsZero() || collectedAt.Before(oldestTS[bouncerName]) { + oldestTS[bouncerName] = collectedAt } for _, item := range m.Items { @@ -130,18 +113,18 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { valid := true if item.Name == nil { - warnOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) // no continue - keep checking the rest valid = false } if item.Unit == nil { - warnOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) valid = false } if item.Value == nil { - warnOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) valid = false } @@ -175,7 +158,30 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { return keys[i].collectedAt.Before(keys[j].collectedAt) }) - s.rawMetrics = keys + return keys, oldestTS +} + +func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { + if db == nil { + return nil + } + + // query all bouncer metrics that have not been flushed + + metrics, err := db.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + ). + // we will process metrics ordered by timestamp, so that active_decisions + // can override previous values + All(ctx) + if err != nil { + return fmt.Errorf("unable to fetch metrics: %w", err) + } + + // de-normalize, de-duplicate metrics and keep the oldest timestamp for each bouncer + + s.rawMetrics, s.oldestTS = s.extractRawMetrics(metrics) s.aggregate() @@ -183,18 +189,18 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { } // return true if the metric is a gauge and should not be aggregated -func (statBouncer) isGauge(name string) bool { +func (*statBouncer) isGauge(name string) bool { return name == "active_decisions" || strings.HasSuffix(name, "_gauge") } // formatMetricName returns the metric name to display in the table header -func (statBouncer) formatMetricName(name string) string { +func (*statBouncer) formatMetricName(name string) string { return strings.TrimSuffix(name, "_gauge") } // formatMetricOrigin returns the origin to display in the table rows // (for example, some users don't know what capi is) -func (statBouncer) formatMetricOrigin(origin string) string { +func (*statBouncer) formatMetricOrigin(origin string) string { switch origin { case "CAPI": origin += " (community blocklist)" From 5b47ad3103fa2b6c64d50373ac851570ab96083c Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 16:00:48 +0200 Subject: [PATCH 111/119] extract aggegation methods --- cmd/crowdsec-cli/climetrics/statbouncer.go | 30 +++++++++++++--------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index e1836a7a434..5972faee553 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -70,6 +70,8 @@ func logWarningOnce(warningsLogged map[string]bool, msg string) { } } +// extractRawMetrics converts metrics from the database to a de-normalized, de-duplicated slice +// it returns the slice and the oldest timestamp for each bouncer func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricItem, map[string]time.Time) { oldestTS := make(map[string]time.Time, 0) @@ -172,8 +174,6 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { Where( metric.GeneratedTypeEQ(metric.GeneratedTypeRC), ). - // we will process metrics ordered by timestamp, so that active_decisions - // can override previous values All(ctx) if err != nil { return fmt.Errorf("unable to fetch metrics: %w", err) @@ -183,7 +183,9 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { s.rawMetrics, s.oldestTS = s.extractRawMetrics(metrics) - s.aggregate() + s.aggregateOverTime() + s.aggregateOverIPType() + s.aggregateOverOrigin() return nil } @@ -212,20 +214,12 @@ func (*statBouncer) formatMetricOrigin(origin string) string { return origin } -func (s *statBouncer) aggregate() { +func (s *statBouncer) aggregateOverTime() { // [bouncer][origin][name][unit][iptype]value if s.aggregated == nil { s.aggregated = make(map[string]map[string]map[string]map[string]map[string]int64) } - if s.aggregatedAllOrigin == nil { - s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) - } - - if s.aggregatedAllIPType == nil { - s.aggregatedAllIPType = make(map[string]map[string]map[string]map[string]int64) - } - // first round, we aggregate over time if the metric is not of type "gauge" for _, raw := range s.rawMetrics { @@ -255,6 +249,12 @@ func (s *statBouncer) aggregate() { s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] += int64(raw.value) } } +} + +func (s *statBouncer) aggregateOverIPType() { + if s.aggregatedAllIPType == nil { + s.aggregatedAllIPType = make(map[string]map[string]map[string]map[string]int64) + } // second round, we always aggregate over ip type @@ -283,6 +283,12 @@ func (s *statBouncer) aggregate() { } } } +} + +func (s *statBouncer) aggregateOverOrigin() { + if s.aggregatedAllOrigin == nil { + s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) + } // third round, we always aggregate over origin From 1187184100f5cd38a54556d9d04a5c617cd6d538 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 16:48:28 +0200 Subject: [PATCH 112/119] types for aggregated data --- cmd/crowdsec-cli/climetrics/statbouncer.go | 169 ++++++++++----------- 1 file changed, 84 insertions(+), 85 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 5972faee553..fa63900ba04 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -33,18 +33,19 @@ type bouncerMetricItem struct { value float64 } +type aggregationOverTime map[string]map[string]map[string]map[string]map[string]int64 +type aggregationOverIPType map[string]map[string]map[string]map[string]int64 +type aggregationOverOrigin map[string]map[string]map[string]int64 + type statBouncer struct { // oldest collection timestamp for each bouncer oldestTS map[string]time.Time - // we keep de-normalized metrics so we can iterate - // over them multiple times and keep the aggregation code simple - rawMetrics []bouncerMetricItem - // [bouncer][origin][name][unit][iptype]value - aggregated map[string]map[string]map[string]map[string]map[string]int64 + // aggregate over ip type: always sum // [bouncer][origin][name][unit]value - aggregatedAllIPType map[string]map[string]map[string]map[string]int64 + aggOverIPType aggregationOverIPType + // aggregate over origin: always sum // [bouncer][name][unit]value - aggregatedAllOrigin map[string]map[string]map[string]int64 + aggOverOrigin aggregationOverOrigin } var knownPlurals = map[string]string{ @@ -54,10 +55,10 @@ var knownPlurals = map[string]string{ } func (s *statBouncer) MarshalJSON() ([]byte, error) { - return json.Marshal(s.aggregatedAllIPType) + return json.Marshal(s.aggOverIPType) } -func (s *statBouncer) Description() (string, string) { +func (*statBouncer) Description() (string, string) { return "Bouncer Metrics", `Network traffic blocked by bouncers.` } @@ -181,11 +182,12 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { // de-normalize, de-duplicate metrics and keep the oldest timestamp for each bouncer - s.rawMetrics, s.oldestTS = s.extractRawMetrics(metrics) + rawMetrics, oldestTS := s.extractRawMetrics(metrics) - s.aggregateOverTime() - s.aggregateOverIPType() - s.aggregateOverOrigin() + s.oldestTS = oldestTS + aggOverTime := s.newAggregationOverTime(rawMetrics) + s.aggOverIPType = s.newAggregationOverIPType(aggOverTime) + s.aggOverOrigin = s.newAggregationOverOrigin(s.aggOverIPType) return nil } @@ -214,122 +216,124 @@ func (*statBouncer) formatMetricOrigin(origin string) string { return origin } -func (s *statBouncer) aggregateOverTime() { +func (s *statBouncer) newAggregationOverTime(rawMetrics []bouncerMetricItem) aggregationOverTime { + // aggregate over time: sum for non-gauge, last by timestamp for gauge // [bouncer][origin][name][unit][iptype]value - if s.aggregated == nil { - s.aggregated = make(map[string]map[string]map[string]map[string]map[string]int64) - } + + ret := aggregationOverTime{} // first round, we aggregate over time if the metric is not of type "gauge" - for _, raw := range s.rawMetrics { - if _, ok := s.aggregated[raw.bouncerName]; !ok { - s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]map[string]int64) + for _, raw := range rawMetrics { + if _, ok := ret[raw.bouncerName]; !ok { + ret[raw.bouncerName] = make(map[string]map[string]map[string]map[string]int64) } - if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { - s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]map[string]int64) + if _, ok := ret[raw.bouncerName][raw.origin]; !ok { + ret[raw.bouncerName][raw.origin] = make(map[string]map[string]map[string]int64) } - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]map[string]int64) + if _, ok := ret[raw.bouncerName][raw.origin][raw.name]; !ok { + ret[raw.bouncerName][raw.origin][raw.name] = make(map[string]map[string]int64) } - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = make(map[string]int64) + if _, ok := ret[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { + ret[raw.bouncerName][raw.origin][raw.name][raw.unit] = make(map[string]int64) } - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = 0 + if _, ok := ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType]; !ok { + ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = 0 } if s.isGauge(raw.name) { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = int64(raw.value) + ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = int64(raw.value) } else { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] += int64(raw.value) + ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] += int64(raw.value) } } + + return ret } -func (s *statBouncer) aggregateOverIPType() { - if s.aggregatedAllIPType == nil { - s.aggregatedAllIPType = make(map[string]map[string]map[string]map[string]int64) - } +func (*statBouncer) newAggregationOverIPType(aggMetrics aggregationOverTime) aggregationOverIPType { + ret := aggregationOverIPType{} // second round, we always aggregate over ip type - for bouncerName := range s.aggregated { - if _, ok := s.aggregatedAllIPType[bouncerName]; !ok { - s.aggregatedAllIPType[bouncerName] = make(map[string]map[string]map[string]int64) + for bouncerName := range aggMetrics { + if _, ok := ret[bouncerName]; !ok { + ret[bouncerName] = make(map[string]map[string]map[string]int64) } - for origin := range s.aggregated[bouncerName] { - if _, ok := s.aggregatedAllIPType[bouncerName][origin]; !ok { - s.aggregatedAllIPType[bouncerName][origin] = make(map[string]map[string]int64) + for origin := range aggMetrics[bouncerName] { + if _, ok := ret[bouncerName][origin]; !ok { + ret[bouncerName][origin] = make(map[string]map[string]int64) } - for name := range s.aggregated[bouncerName][origin] { - if _, ok := s.aggregatedAllIPType[bouncerName][origin][name]; !ok { - s.aggregatedAllIPType[bouncerName][origin][name] = make(map[string]int64) + for name := range aggMetrics[bouncerName][origin] { + if _, ok := ret[bouncerName][origin][name]; !ok { + ret[bouncerName][origin][name] = make(map[string]int64) } - for unit := range s.aggregated[bouncerName][origin][name] { - if _, ok := s.aggregatedAllIPType[bouncerName][origin][name][unit]; !ok { - s.aggregatedAllIPType[bouncerName][origin][name][unit] = 0 + for unit := range aggMetrics[bouncerName][origin][name] { + if _, ok := ret[bouncerName][origin][name][unit]; !ok { + ret[bouncerName][origin][name][unit] = 0 } - for ipType := range s.aggregated[bouncerName][origin][name][unit] { - value := s.aggregated[bouncerName][origin][name][unit][ipType] - s.aggregatedAllIPType[bouncerName][origin][name][unit] += value + for ipType := range aggMetrics[bouncerName][origin][name][unit] { + value := aggMetrics[bouncerName][origin][name][unit][ipType] + ret[bouncerName][origin][name][unit] += value } } } } } + + return ret } -func (s *statBouncer) aggregateOverOrigin() { - if s.aggregatedAllOrigin == nil { - s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) - } +func (s *statBouncer) newAggregationOverOrigin(aggMetrics aggregationOverIPType) aggregationOverOrigin { + ret := aggregationOverOrigin{} // third round, we always aggregate over origin - for bouncerName := range s.aggregatedAllIPType { - if _, ok := s.aggregatedAllOrigin[bouncerName]; !ok { - s.aggregatedAllOrigin[bouncerName] = make(map[string]map[string]int64) + for bouncerName := range aggMetrics { + if _, ok := ret[bouncerName]; !ok { + ret[bouncerName] = make(map[string]map[string]int64) } - for origin := range s.aggregatedAllIPType[bouncerName] { - for name := range s.aggregatedAllIPType[bouncerName][origin] { - if _, ok := s.aggregatedAllOrigin[bouncerName][name]; !ok { - s.aggregatedAllOrigin[bouncerName][name] = make(map[string]int64) + for origin := range aggMetrics[bouncerName] { + for name := range aggMetrics[bouncerName][origin] { + if _, ok := ret[bouncerName][name]; !ok { + ret[bouncerName][name] = make(map[string]int64) } - for unit := range s.aggregatedAllIPType[bouncerName][origin][name] { - val := s.aggregatedAllIPType[bouncerName][origin][name][unit] - s.aggregatedAllOrigin[bouncerName][name][unit] += val + for unit := range aggMetrics[bouncerName][origin][name] { + val := aggMetrics[bouncerName][origin][name][unit] + ret[bouncerName][name][unit] += val } } } } + + return ret } // bouncerTable displays a table of metrics for a single bouncer func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { - columns := make(map[string]map[string]bool) + columns := make(map[string]map[string]struct{}) - for _, item := range s.rawMetrics { - if item.bouncerName != bouncerName { - continue - } + bouncerData, ok := s.aggOverOrigin[bouncerName] + if !ok { + // no metrics for this bouncer, skip. how did we get here ? + // anyway we can't honor the "showEmpty" flag in this case, + // we don't even have the table headers + return + } + + for metricName, units := range bouncerData { // build a map of the metric names and units, to display dynamic columns - if _, ok := columns[item.name]; !ok { - columns[item.name] = make(map[string]bool) + columns[metricName] = make(map[string]struct{}) + for unit := range units { + columns[metricName][unit] = struct{}{} } - - columns[item.name][item.unit] = true } - // no metrics for this bouncer, skip. how did we get here ? - // anyway we can't honor the "showEmpty" flag in this case, - // we don't heven have the table headers - if len(columns) == 0 { return } @@ -376,7 +380,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor // sort all the ranges for stable output - for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + for _, origin := range maptools.SortedKeys(s.aggOverIPType[bouncerName]) { if origin == "" { // if the metric has no origin (i.e. processed bytes/packets) // we don't display it in the table body but it still gets aggreagted @@ -384,7 +388,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor continue } - metrics := s.aggregatedAllIPType[bouncerName][origin] + metrics := s.aggOverIPType[bouncerName][origin] row := table.Row{s.formatMetricOrigin(origin)} @@ -406,7 +410,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor numRows += 1 } - totals := s.aggregatedAllOrigin[bouncerName] + totals := s.aggOverOrigin[bouncerName] if numRows == 0 { t.Style().Options.SeparateFooter = false @@ -441,12 +445,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor // Table displays a table of metrics for each bouncer func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { - bouncerNames := make(map[string]bool) - for _, item := range s.rawMetrics { - bouncerNames[item.bouncerName] = true - } - - for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) } } From 644877703489f70f1e56a96ff79e69ffd45987cb Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 17:08:39 +0200 Subject: [PATCH 113/119] unroll map creation, extract more methods --- cmd/crowdsec-cli/climetrics/statbouncer.go | 124 ++++++++++++--------- 1 file changed, 71 insertions(+), 53 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index fa63900ba04..380cf852929 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -34,9 +34,76 @@ type bouncerMetricItem struct { } type aggregationOverTime map[string]map[string]map[string]map[string]map[string]int64 + +func (a aggregationOverTime) add(bouncerName, origin, name, unit, ipType string, value float64, isGauge bool) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin]; !ok { + a[bouncerName][origin] = make(map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name]; !ok { + a[bouncerName][origin][name] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name][unit]; !ok { + a[bouncerName][origin][name][unit] = make(map[string]int64) + } + + if _, ok := a[bouncerName][origin][name][unit][ipType]; !ok { + a[bouncerName][origin][name][unit][ipType] = 0 + } + + if isGauge { + a[bouncerName][origin][name][unit][ipType] = int64(value) + } else { + a[bouncerName][origin][name][unit][ipType] += int64(value) + } +} + + type aggregationOverIPType map[string]map[string]map[string]map[string]int64 + +func (a aggregationOverIPType) add(bouncerName, origin, name, unit string, value int64) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin]; !ok { + a[bouncerName][origin] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name]; !ok { + a[bouncerName][origin][name] = make(map[string]int64) + } + + if _, ok := a[bouncerName][origin][name][unit]; !ok { + a[bouncerName][origin][name][unit] = 0 + } + + a[bouncerName][origin][name][unit] += value +} + type aggregationOverOrigin map[string]map[string]map[string]int64 +func (a aggregationOverOrigin) add(bouncerName, name, unit string, value int64) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][name]; !ok { + a[bouncerName][name] = make(map[string]int64) + } + + if _, ok := a[bouncerName][name][unit]; !ok { + a[bouncerName][name][unit] = 0 + } + + a[bouncerName][name][unit] += value +} + type statBouncer struct { // oldest collection timestamp for each bouncer oldestTS map[string]time.Time @@ -222,34 +289,8 @@ func (s *statBouncer) newAggregationOverTime(rawMetrics []bouncerMetricItem) agg ret := aggregationOverTime{} - // first round, we aggregate over time if the metric is not of type "gauge" - for _, raw := range rawMetrics { - if _, ok := ret[raw.bouncerName]; !ok { - ret[raw.bouncerName] = make(map[string]map[string]map[string]map[string]int64) - } - - if _, ok := ret[raw.bouncerName][raw.origin]; !ok { - ret[raw.bouncerName][raw.origin] = make(map[string]map[string]map[string]int64) - } - - if _, ok := ret[raw.bouncerName][raw.origin][raw.name]; !ok { - ret[raw.bouncerName][raw.origin][raw.name] = make(map[string]map[string]int64) - } - - if _, ok := ret[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { - ret[raw.bouncerName][raw.origin][raw.name][raw.unit] = make(map[string]int64) - } - - if _, ok := ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType]; !ok { - ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = 0 - } - - if s.isGauge(raw.name) { - ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] = int64(raw.value) - } else { - ret[raw.bouncerName][raw.origin][raw.name][raw.unit][raw.ipType] += int64(raw.value) - } + ret.add(raw.bouncerName, raw.origin, raw.name, raw.unit, raw.ipType, raw.value, s.isGauge(raw.name)) } return ret @@ -258,28 +299,13 @@ func (s *statBouncer) newAggregationOverTime(rawMetrics []bouncerMetricItem) agg func (*statBouncer) newAggregationOverIPType(aggMetrics aggregationOverTime) aggregationOverIPType { ret := aggregationOverIPType{} - // second round, we always aggregate over ip type - for bouncerName := range aggMetrics { - if _, ok := ret[bouncerName]; !ok { - ret[bouncerName] = make(map[string]map[string]map[string]int64) - } for origin := range aggMetrics[bouncerName] { - if _, ok := ret[bouncerName][origin]; !ok { - ret[bouncerName][origin] = make(map[string]map[string]int64) - } for name := range aggMetrics[bouncerName][origin] { - if _, ok := ret[bouncerName][origin][name]; !ok { - ret[bouncerName][origin][name] = make(map[string]int64) - } for unit := range aggMetrics[bouncerName][origin][name] { - if _, ok := ret[bouncerName][origin][name][unit]; !ok { - ret[bouncerName][origin][name][unit] = 0 - } - for ipType := range aggMetrics[bouncerName][origin][name][unit] { value := aggMetrics[bouncerName][origin][name][unit][ipType] - ret[bouncerName][origin][name][unit] += value + ret.add(bouncerName, origin, name, unit, value) } } } @@ -289,23 +315,15 @@ func (*statBouncer) newAggregationOverIPType(aggMetrics aggregationOverTime) agg return ret } -func (s *statBouncer) newAggregationOverOrigin(aggMetrics aggregationOverIPType) aggregationOverOrigin { +func (*statBouncer) newAggregationOverOrigin(aggMetrics aggregationOverIPType) aggregationOverOrigin { ret := aggregationOverOrigin{} - // third round, we always aggregate over origin - for bouncerName := range aggMetrics { - if _, ok := ret[bouncerName]; !ok { - ret[bouncerName] = make(map[string]map[string]int64) - } for origin := range aggMetrics[bouncerName] { for name := range aggMetrics[bouncerName][origin] { - if _, ok := ret[bouncerName][name]; !ok { - ret[bouncerName][name] = make(map[string]int64) - } for unit := range aggMetrics[bouncerName][origin][name] { val := aggMetrics[bouncerName][origin][name][unit] - ret[bouncerName][name][unit] += val + ret.add(bouncerName, name, unit, val) } } } From 9ad7a73d1e5793b4838c2a7b2c7a1934a005b70d Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 23 Jul 2024 17:17:05 +0200 Subject: [PATCH 114/119] do not try to get console options for LP --- cmd/crowdsec/crowdsec.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 1de885027e0..5aafc6b0dfe 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -144,7 +144,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H apiClient, lpMetricsDefaultInterval, log.WithField("service", "lpmetrics"), - cConfig.API.Server.ConsoleConfig.EnabledOptions(), + []string{}, datasources, hub, ) From d7185a9ee3658f56f21690c0d0026dd9fd641b24 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 17:28:21 +0200 Subject: [PATCH 115/119] commends and dry --- cmd/crowdsec-cli/climetrics/statbouncer.go | 25 ++++++++++------------ 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 380cf852929..87934344393 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -22,7 +22,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -// un-aggregated data, de-normalized. +// bouncerMetricItem represents unaggregated, denormalized metric data. +// Possibly not unique if a bouncer sent the same data multiple times. type bouncerMetricItem struct { collectedAt time.Time bouncerName string @@ -33,6 +34,9 @@ type bouncerMetricItem struct { value float64 } +// aggregationOverTime is the first level of aggregation: we aggregate +// over time, then over ip type, then over origin. we only sum values +// for non-gauge metrics, and take the last value for gauge metrics. type aggregationOverTime map[string]map[string]map[string]map[string]map[string]int64 func (a aggregationOverTime) add(bouncerName, origin, name, unit, ipType string, value float64, isGauge bool) { @@ -52,10 +56,6 @@ func (a aggregationOverTime) add(bouncerName, origin, name, unit, ipType string, a[bouncerName][origin][name][unit] = make(map[string]int64) } - if _, ok := a[bouncerName][origin][name][unit][ipType]; !ok { - a[bouncerName][origin][name][unit][ipType] = 0 - } - if isGauge { a[bouncerName][origin][name][unit][ipType] = int64(value) } else { @@ -63,7 +63,9 @@ func (a aggregationOverTime) add(bouncerName, origin, name, unit, ipType string, } } - +// aggregationOverIPType is the second level of aggregation: data is summed +// regardless of the metrics type (gauge or not). This is used to display +// table rows, they won't differentiate ipv4 and ipv6 type aggregationOverIPType map[string]map[string]map[string]map[string]int64 func (a aggregationOverIPType) add(bouncerName, origin, name, unit string, value int64) { @@ -79,13 +81,12 @@ func (a aggregationOverIPType) add(bouncerName, origin, name, unit string, value a[bouncerName][origin][name] = make(map[string]int64) } - if _, ok := a[bouncerName][origin][name][unit]; !ok { - a[bouncerName][origin][name][unit] = 0 - } - a[bouncerName][origin][name][unit] += value } +// aggregationOverOrigin is the third level of aggregation: these are +// the totals at the end of the table. Metrics without an origin will +// be added to the totals but not displayed in the rows, only in the footer. type aggregationOverOrigin map[string]map[string]map[string]int64 func (a aggregationOverOrigin) add(bouncerName, name, unit string, value int64) { @@ -97,10 +98,6 @@ func (a aggregationOverOrigin) add(bouncerName, name, unit string, value int64) a[bouncerName][name] = make(map[string]int64) } - if _, ok := a[bouncerName][name][unit]; !ok { - a[bouncerName][name][unit] = 0 - } - a[bouncerName][name][unit] += value } From d5444b0e20549ccd417f1849c8e59f8cbeff562b Mon Sep 17 00:00:00 2001 From: Sebastien Blot Date: Tue, 23 Jul 2024 17:41:19 +0200 Subject: [PATCH 116/119] default value for received_at --- pkg/database/ent/metric.go | 11 +++++++---- pkg/database/ent/metric/metric.go | 6 ++++++ pkg/database/ent/metric/where.go | 10 ++++++++++ pkg/database/ent/metric_create.go | 23 +++++++++++++++++++---- pkg/database/ent/metric_update.go | 6 ++++++ pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/mutation.go | 21 ++++++++++++++++++++- pkg/database/ent/runtime.go | 7 +++++++ pkg/database/ent/schema/metric.go | 4 ++++ 9 files changed, 80 insertions(+), 10 deletions(-) diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go index 47f3b4df4e5..7a4552187c5 100644 --- a/pkg/database/ent/metric.go +++ b/pkg/database/ent/metric.go @@ -23,7 +23,7 @@ type Metric struct { // It must come from the auth middleware. GeneratedBy string `json:"generated_by,omitempty"` // When the metrics are received by LAPI - ReceivedAt time.Time `json:"received_at,omitempty"` + ReceivedAt *time.Time `json:"received_at,omitempty"` // When the metrics are sent to the console PushedAt *time.Time `json:"pushed_at,omitempty"` // The actual metrics (item0) @@ -79,7 +79,8 @@ func (m *Metric) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field received_at", values[i]) } else if value.Valid { - m.ReceivedAt = value.Time + m.ReceivedAt = new(time.Time) + *m.ReceivedAt = value.Time } case metric.FieldPushedAt: if value, ok := values[i].(*sql.NullTime); !ok { @@ -136,8 +137,10 @@ func (m *Metric) String() string { builder.WriteString("generated_by=") builder.WriteString(m.GeneratedBy) builder.WriteString(", ") - builder.WriteString("received_at=") - builder.WriteString(m.ReceivedAt.Format(time.ANSIC)) + if v := m.ReceivedAt; v != nil { + builder.WriteString("received_at=") + builder.WriteString(v.Format(time.ANSIC)) + } builder.WriteString(", ") if v := m.PushedAt; v != nil { builder.WriteString("pushed_at=") diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go index 78e88982220..1e46081e479 100644 --- a/pkg/database/ent/metric/metric.go +++ b/pkg/database/ent/metric/metric.go @@ -4,6 +4,7 @@ package metric import ( "fmt" + "time" "entgo.io/ent/dialect/sql" ) @@ -47,6 +48,11 @@ func ValidColumn(column string) bool { return false } +var ( + // DefaultReceivedAt holds the default value on creation for the "received_at" field. + DefaultReceivedAt func() time.Time +) + // GeneratedType defines the type for the "generated_type" enum field. type GeneratedType string diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go index 72bd9d93cd7..9f4a7f567a8 100644 --- a/pkg/database/ent/metric/where.go +++ b/pkg/database/ent/metric/where.go @@ -199,6 +199,16 @@ func ReceivedAtLTE(v time.Time) predicate.Metric { return predicate.Metric(sql.FieldLTE(FieldReceivedAt, v)) } +// ReceivedAtIsNil applies the IsNil predicate on the "received_at" field. +func ReceivedAtIsNil() predicate.Metric { + return predicate.Metric(sql.FieldIsNull(FieldReceivedAt)) +} + +// ReceivedAtNotNil applies the NotNil predicate on the "received_at" field. +func ReceivedAtNotNil() predicate.Metric { + return predicate.Metric(sql.FieldNotNull(FieldReceivedAt)) +} + // PushedAtEQ applies the EQ predicate on the "pushed_at" field. func PushedAtEQ(v time.Time) predicate.Metric { return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go index 973cddd41d0..22935d8ae2f 100644 --- a/pkg/database/ent/metric_create.go +++ b/pkg/database/ent/metric_create.go @@ -38,6 +38,14 @@ func (mc *MetricCreate) SetReceivedAt(t time.Time) *MetricCreate { return mc } +// SetNillableReceivedAt sets the "received_at" field if the given value is not nil. +func (mc *MetricCreate) SetNillableReceivedAt(t *time.Time) *MetricCreate { + if t != nil { + mc.SetReceivedAt(*t) + } + return mc +} + // SetPushedAt sets the "pushed_at" field. func (mc *MetricCreate) SetPushedAt(t time.Time) *MetricCreate { mc.mutation.SetPushedAt(t) @@ -65,6 +73,7 @@ func (mc *MetricCreate) Mutation() *MetricMutation { // Save creates the Metric in the database. func (mc *MetricCreate) Save(ctx context.Context) (*Metric, error) { + mc.defaults() return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) } @@ -90,6 +99,14 @@ func (mc *MetricCreate) ExecX(ctx context.Context) { } } +// defaults sets the default values of the builder before save. +func (mc *MetricCreate) defaults() { + if _, ok := mc.mutation.ReceivedAt(); !ok { + v := metric.DefaultReceivedAt() + mc.mutation.SetReceivedAt(v) + } +} + // check runs all checks and user-defined validators on the builder. func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedType(); !ok { @@ -103,9 +120,6 @@ func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedBy(); !ok { return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} } - if _, ok := mc.mutation.ReceivedAt(); !ok { - return &ValidationError{Name: "received_at", err: errors.New(`ent: missing required field "Metric.received_at"`)} - } if _, ok := mc.mutation.Payload(); !ok { return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} } @@ -145,7 +159,7 @@ func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { } if value, ok := mc.mutation.ReceivedAt(); ok { _spec.SetField(metric.FieldReceivedAt, field.TypeTime, value) - _node.ReceivedAt = value + _node.ReceivedAt = &value } if value, ok := mc.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) @@ -176,6 +190,7 @@ func (mcb *MetricCreateBulk) Save(ctx context.Context) ([]*Metric, error) { for i := range mcb.builders { func(i int, root context.Context) { builder := mcb.builders[i] + builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*MetricMutation) if !ok { diff --git a/pkg/database/ent/metric_update.go b/pkg/database/ent/metric_update.go index 4da33dd6ce9..1a9b216bee9 100644 --- a/pkg/database/ent/metric_update.go +++ b/pkg/database/ent/metric_update.go @@ -89,6 +89,9 @@ func (mu *MetricUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } + if mu.mutation.ReceivedAtCleared() { + _spec.ClearField(metric.FieldReceivedAt, field.TypeTime) + } if value, ok := mu.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) } @@ -206,6 +209,9 @@ func (muo *MetricUpdateOne) sqlSave(ctx context.Context) (_node *Metric, err err } } } + if muo.mutation.ReceivedAtCleared() { + _spec.ClearField(metric.FieldReceivedAt, field.TypeTime) + } if value, ok := muo.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 986f5bc8c67..cc101fd007b 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -254,7 +254,7 @@ var ( {Name: "id", Type: field.TypeInt, Increment: true}, {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, {Name: "generated_by", Type: field.TypeString}, - {Name: "received_at", Type: field.TypeTime}, + {Name: "received_at", Type: field.TypeTime, Nullable: true}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, {Name: "payload", Type: field.TypeString, Size: 2147483647}, } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 5c6596f3db4..e20798aa621 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -8836,7 +8836,7 @@ func (m *MetricMutation) ReceivedAt() (r time.Time, exists bool) { // OldReceivedAt returns the old "received_at" field's value of the Metric entity. // If the Metric object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v time.Time, err error) { +func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v *time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldReceivedAt is only allowed on UpdateOne operations") } @@ -8850,9 +8850,22 @@ func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v time.Time, err er return oldValue.ReceivedAt, nil } +// ClearReceivedAt clears the value of the "received_at" field. +func (m *MetricMutation) ClearReceivedAt() { + m.received_at = nil + m.clearedFields[metric.FieldReceivedAt] = struct{}{} +} + +// ReceivedAtCleared returns if the "received_at" field was cleared in this mutation. +func (m *MetricMutation) ReceivedAtCleared() bool { + _, ok := m.clearedFields[metric.FieldReceivedAt] + return ok +} + // ResetReceivedAt resets all changes to the "received_at" field. func (m *MetricMutation) ResetReceivedAt() { m.received_at = nil + delete(m.clearedFields, metric.FieldReceivedAt) } // SetPushedAt sets the "pushed_at" field. @@ -9101,6 +9114,9 @@ func (m *MetricMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MetricMutation) ClearedFields() []string { var fields []string + if m.FieldCleared(metric.FieldReceivedAt) { + fields = append(fields, metric.FieldReceivedAt) + } if m.FieldCleared(metric.FieldPushedAt) { fields = append(fields, metric.FieldPushedAt) } @@ -9118,6 +9134,9 @@ func (m *MetricMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MetricMutation) ClearField(name string) error { switch name { + case metric.FieldReceivedAt: + m.ClearReceivedAt() + return nil case metric.FieldPushedAt: m.ClearPushedAt() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 15413490633..569c043503a 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -13,6 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) @@ -170,4 +171,10 @@ func init() { metaDescValue := metaFields[3].Descriptor() // meta.ValueValidator is a validator for the "value" field. It is called by the builders before save. meta.ValueValidator = metaDescValue.Validators[0].(func(string) error) + metricFields := schema.Metric{}.Fields() + _ = metricFields + // metricDescReceivedAt is the schema descriptor for received_at field. + metricDescReceivedAt := metricFields[2].Descriptor() + // metric.DefaultReceivedAt holds the default value on creation for the received_at field. + metric.DefaultReceivedAt = metricDescReceivedAt.Default.(func() time.Time) } diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index 319c67b7aa7..d02a7685356 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) // Metric is actually a set of metrics collected by a device @@ -22,6 +23,9 @@ func (Metric) Fields() []ent.Field { Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), field.Time("received_at"). Immutable(). + Default(types.UtcNow). + Nillable(). + Optional(). Comment("When the metrics are received by LAPI"), field.Time("pushed_at"). Nillable(). From aeb3f080efcfa135e76b8636761e242c657ecacd Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 23:11:29 +0200 Subject: [PATCH 117/119] lin/style --- cmd/crowdsec-cli/climetrics/statbouncer.go | 44 ++++++++-------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 87934344393..7d80e902961 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -138,7 +138,7 @@ func logWarningOnce(warningsLogged map[string]bool, msg string) { // extractRawMetrics converts metrics from the database to a de-normalized, de-duplicated slice // it returns the slice and the oldest timestamp for each bouncer func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricItem, map[string]time.Time) { - oldestTS := make(map[string]time.Time, 0) + oldestTS := make(map[string]time.Time) // don't spam the user with the same warnings warningsLogged := make(map[string]bool) @@ -149,14 +149,11 @@ func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricIte for _, met := range metrics { bouncerName := met.GeneratedBy - type bouncerMetrics struct { + var payload struct { Metrics []models.DetailedMetrics `json:"metrics"` } - payload := bouncerMetrics{} - - err := json.Unmarshal([]byte(met.Payload), &payload) - if err != nil { + if err := json.Unmarshal([]byte(met.Payload), &payload); err != nil { log.Warningf("while parsing metrics for %s: %s", bouncerName, err) continue } @@ -175,8 +172,6 @@ func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricIte } for _, item := range m.Items { - labels := item.Labels - valid := true if item.Name == nil { @@ -202,8 +197,8 @@ func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricIte rawMetric := bouncerMetricItem{ collectedAt: collectedAt, bouncerName: bouncerName, - ipType: labels["ip_type"], - origin: labels["origin"], + ipType: item.Labels["ip_type"], + origin: item.Labels["origin"], name: *item.Name, unit: *item.Unit, value: *item.Value, @@ -236,9 +231,7 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { // query all bouncer metrics that have not been flushed metrics, err := db.Ent.Metric.Query(). - Where( - metric.GeneratedTypeEQ(metric.GeneratedTypeRC), - ). + Where(metric.GeneratedTypeEQ(metric.GeneratedTypeRC)). All(ctx) if err != nil { return fmt.Errorf("unable to fetch metrics: %w", err) @@ -271,19 +264,17 @@ func (*statBouncer) formatMetricName(name string) string { func (*statBouncer) formatMetricOrigin(origin string) string { switch origin { case "CAPI": - origin += " (community blocklist)" + return origin + " (community blocklist)" case "cscli": - origin += " (manual decisions)" + return origin + " (manual decisions)" case "crowdsec": - origin += " (security engine)" + return origin + " (security engine)" + default: + return origin } - return origin } func (s *statBouncer) newAggregationOverTime(rawMetrics []bouncerMetricItem) aggregationOverTime { - // aggregate over time: sum for non-gauge, last by timestamp for gauge - // [bouncer][origin][name][unit][iptype]value - ret := aggregationOverTime{} for _, raw := range rawMetrics { @@ -372,8 +363,8 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor header1 = append(header1, s.formatMetricName(name)) // we don't add "s" to random words - if knownPlurals[unit] != "" { - unit = knownPlurals[unit] + if plural, ok := knownPlurals[unit]; ok { + unit = plural } header2 = append(header2, unit) @@ -411,8 +402,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor for _, unit := range maptools.SortedKeys(columns[name]) { valStr := "-" - val, ok := metrics[name][unit] - if ok { + if val, ok := metrics[name][unit]; ok { valStr = formatNumber(val, !noUnit) } @@ -449,11 +439,9 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } - title += ":" - // don't use SetTitle() because it draws the title inside table box - io.WriteString(out, title+"\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") // empty line between tables io.WriteString(out, "\n") } From e55747a64f8d0c685df22aa37753887d3587ca95 Mon Sep 17 00:00:00 2001 From: marco Date: Tue, 23 Jul 2024 23:56:39 +0200 Subject: [PATCH 118/119] mod tidy --- go.mod | 3 +-- go.sum | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f36bbcd996d..af9d7550b94 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 - github.com/antonmedv/expr v1.15.3 github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 @@ -33,6 +32,7 @@ require ( github.com/dghubble/sling v1.4.2 github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 + github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.9.1 @@ -111,7 +111,6 @@ require ( github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/expr-lang/expr v1.16.9 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index d4cc2651f0f..282f10d6367 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexliesenfeld/health v0.8.0 h1:lCV0i+ZJPTbqP7LfKG7p3qZBl5VhelwUFCIVWl77fgk= github.com/alexliesenfeld/health v0.8.0/go.mod h1:TfNP0f+9WQVWMQRzvMUjlws4ceXKEL3WR+6Hp95HUFc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= -github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5poiZ7Y= From 01479b82ab64c2fab69a0b916a5c8a8bcaa8831d Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 24 Jul 2024 10:11:36 +0200 Subject: [PATCH 119/119] Revert "default value for received_at" This reverts commit d5444b0e20549ccd417f1849c8e59f8cbeff562b. --- pkg/database/ent/metric.go | 11 ++++------- pkg/database/ent/metric/metric.go | 6 ------ pkg/database/ent/metric/where.go | 10 ---------- pkg/database/ent/metric_create.go | 23 ++++------------------- pkg/database/ent/metric_update.go | 6 ------ pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/mutation.go | 21 +-------------------- pkg/database/ent/runtime.go | 7 ------- pkg/database/ent/schema/metric.go | 4 ---- 9 files changed, 10 insertions(+), 80 deletions(-) diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go index 7a4552187c5..47f3b4df4e5 100644 --- a/pkg/database/ent/metric.go +++ b/pkg/database/ent/metric.go @@ -23,7 +23,7 @@ type Metric struct { // It must come from the auth middleware. GeneratedBy string `json:"generated_by,omitempty"` // When the metrics are received by LAPI - ReceivedAt *time.Time `json:"received_at,omitempty"` + ReceivedAt time.Time `json:"received_at,omitempty"` // When the metrics are sent to the console PushedAt *time.Time `json:"pushed_at,omitempty"` // The actual metrics (item0) @@ -79,8 +79,7 @@ func (m *Metric) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field received_at", values[i]) } else if value.Valid { - m.ReceivedAt = new(time.Time) - *m.ReceivedAt = value.Time + m.ReceivedAt = value.Time } case metric.FieldPushedAt: if value, ok := values[i].(*sql.NullTime); !ok { @@ -137,10 +136,8 @@ func (m *Metric) String() string { builder.WriteString("generated_by=") builder.WriteString(m.GeneratedBy) builder.WriteString(", ") - if v := m.ReceivedAt; v != nil { - builder.WriteString("received_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("received_at=") + builder.WriteString(m.ReceivedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.PushedAt; v != nil { builder.WriteString("pushed_at=") diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go index 1e46081e479..78e88982220 100644 --- a/pkg/database/ent/metric/metric.go +++ b/pkg/database/ent/metric/metric.go @@ -4,7 +4,6 @@ package metric import ( "fmt" - "time" "entgo.io/ent/dialect/sql" ) @@ -48,11 +47,6 @@ func ValidColumn(column string) bool { return false } -var ( - // DefaultReceivedAt holds the default value on creation for the "received_at" field. - DefaultReceivedAt func() time.Time -) - // GeneratedType defines the type for the "generated_type" enum field. type GeneratedType string diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go index 9f4a7f567a8..72bd9d93cd7 100644 --- a/pkg/database/ent/metric/where.go +++ b/pkg/database/ent/metric/where.go @@ -199,16 +199,6 @@ func ReceivedAtLTE(v time.Time) predicate.Metric { return predicate.Metric(sql.FieldLTE(FieldReceivedAt, v)) } -// ReceivedAtIsNil applies the IsNil predicate on the "received_at" field. -func ReceivedAtIsNil() predicate.Metric { - return predicate.Metric(sql.FieldIsNull(FieldReceivedAt)) -} - -// ReceivedAtNotNil applies the NotNil predicate on the "received_at" field. -func ReceivedAtNotNil() predicate.Metric { - return predicate.Metric(sql.FieldNotNull(FieldReceivedAt)) -} - // PushedAtEQ applies the EQ predicate on the "pushed_at" field. func PushedAtEQ(v time.Time) predicate.Metric { return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go index 22935d8ae2f..973cddd41d0 100644 --- a/pkg/database/ent/metric_create.go +++ b/pkg/database/ent/metric_create.go @@ -38,14 +38,6 @@ func (mc *MetricCreate) SetReceivedAt(t time.Time) *MetricCreate { return mc } -// SetNillableReceivedAt sets the "received_at" field if the given value is not nil. -func (mc *MetricCreate) SetNillableReceivedAt(t *time.Time) *MetricCreate { - if t != nil { - mc.SetReceivedAt(*t) - } - return mc -} - // SetPushedAt sets the "pushed_at" field. func (mc *MetricCreate) SetPushedAt(t time.Time) *MetricCreate { mc.mutation.SetPushedAt(t) @@ -73,7 +65,6 @@ func (mc *MetricCreate) Mutation() *MetricMutation { // Save creates the Metric in the database. func (mc *MetricCreate) Save(ctx context.Context) (*Metric, error) { - mc.defaults() return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) } @@ -99,14 +90,6 @@ func (mc *MetricCreate) ExecX(ctx context.Context) { } } -// defaults sets the default values of the builder before save. -func (mc *MetricCreate) defaults() { - if _, ok := mc.mutation.ReceivedAt(); !ok { - v := metric.DefaultReceivedAt() - mc.mutation.SetReceivedAt(v) - } -} - // check runs all checks and user-defined validators on the builder. func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedType(); !ok { @@ -120,6 +103,9 @@ func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedBy(); !ok { return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} } + if _, ok := mc.mutation.ReceivedAt(); !ok { + return &ValidationError{Name: "received_at", err: errors.New(`ent: missing required field "Metric.received_at"`)} + } if _, ok := mc.mutation.Payload(); !ok { return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} } @@ -159,7 +145,7 @@ func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { } if value, ok := mc.mutation.ReceivedAt(); ok { _spec.SetField(metric.FieldReceivedAt, field.TypeTime, value) - _node.ReceivedAt = &value + _node.ReceivedAt = value } if value, ok := mc.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) @@ -190,7 +176,6 @@ func (mcb *MetricCreateBulk) Save(ctx context.Context) ([]*Metric, error) { for i := range mcb.builders { func(i int, root context.Context) { builder := mcb.builders[i] - builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*MetricMutation) if !ok { diff --git a/pkg/database/ent/metric_update.go b/pkg/database/ent/metric_update.go index 1a9b216bee9..4da33dd6ce9 100644 --- a/pkg/database/ent/metric_update.go +++ b/pkg/database/ent/metric_update.go @@ -89,9 +89,6 @@ func (mu *MetricUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if mu.mutation.ReceivedAtCleared() { - _spec.ClearField(metric.FieldReceivedAt, field.TypeTime) - } if value, ok := mu.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) } @@ -209,9 +206,6 @@ func (muo *MetricUpdateOne) sqlSave(ctx context.Context) (_node *Metric, err err } } } - if muo.mutation.ReceivedAtCleared() { - _spec.ClearField(metric.FieldReceivedAt, field.TypeTime) - } if value, ok := muo.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index cc101fd007b..986f5bc8c67 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -254,7 +254,7 @@ var ( {Name: "id", Type: field.TypeInt, Increment: true}, {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, {Name: "generated_by", Type: field.TypeString}, - {Name: "received_at", Type: field.TypeTime, Nullable: true}, + {Name: "received_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, {Name: "payload", Type: field.TypeString, Size: 2147483647}, } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index e20798aa621..5c6596f3db4 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -8836,7 +8836,7 @@ func (m *MetricMutation) ReceivedAt() (r time.Time, exists bool) { // OldReceivedAt returns the old "received_at" field's value of the Metric entity. // If the Metric object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldReceivedAt is only allowed on UpdateOne operations") } @@ -8850,22 +8850,9 @@ func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v *time.Time, err e return oldValue.ReceivedAt, nil } -// ClearReceivedAt clears the value of the "received_at" field. -func (m *MetricMutation) ClearReceivedAt() { - m.received_at = nil - m.clearedFields[metric.FieldReceivedAt] = struct{}{} -} - -// ReceivedAtCleared returns if the "received_at" field was cleared in this mutation. -func (m *MetricMutation) ReceivedAtCleared() bool { - _, ok := m.clearedFields[metric.FieldReceivedAt] - return ok -} - // ResetReceivedAt resets all changes to the "received_at" field. func (m *MetricMutation) ResetReceivedAt() { m.received_at = nil - delete(m.clearedFields, metric.FieldReceivedAt) } // SetPushedAt sets the "pushed_at" field. @@ -9114,9 +9101,6 @@ func (m *MetricMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MetricMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(metric.FieldReceivedAt) { - fields = append(fields, metric.FieldReceivedAt) - } if m.FieldCleared(metric.FieldPushedAt) { fields = append(fields, metric.FieldPushedAt) } @@ -9134,9 +9118,6 @@ func (m *MetricMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MetricMutation) ClearField(name string) error { switch name { - case metric.FieldReceivedAt: - m.ClearReceivedAt() - return nil case metric.FieldPushedAt: m.ClearPushedAt() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 569c043503a..15413490633 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -13,7 +13,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) @@ -171,10 +170,4 @@ func init() { metaDescValue := metaFields[3].Descriptor() // meta.ValueValidator is a validator for the "value" field. It is called by the builders before save. meta.ValueValidator = metaDescValue.Validators[0].(func(string) error) - metricFields := schema.Metric{}.Fields() - _ = metricFields - // metricDescReceivedAt is the schema descriptor for received_at field. - metricDescReceivedAt := metricFields[2].Descriptor() - // metric.DefaultReceivedAt holds the default value on creation for the received_at field. - metric.DefaultReceivedAt = metricDescReceivedAt.Default.(func() time.Time) } diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index d02a7685356..319c67b7aa7 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -3,7 +3,6 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" - "github.com/crowdsecurity/crowdsec/pkg/types" ) // Metric is actually a set of metrics collected by a device @@ -23,9 +22,6 @@ func (Metric) Fields() []ent.Field { Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), field.Time("received_at"). Immutable(). - Default(types.UtcNow). - Nillable(). - Optional(). Comment("When the metrics are received by LAPI"), field.Time("pushed_at"). Nillable().