Skip to content

Commit

Permalink
vmpodscrape/vmservicescrape/vmnodescrape/vmstaticscrape: support max_…
Browse files Browse the repository at this point in the history
…scrape_size option
  • Loading branch information
AndrewChubatiuk committed Jun 21, 2024
1 parent e9b2e9c commit bc838c7
Show file tree
Hide file tree
Showing 19 changed files with 54 additions and 2 deletions.
3 changes: 3 additions & 0 deletions api/v1beta1/vmnodescrape_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ type VMNodeScrapeSpec struct {
// Timeout after which the scrape is ended
// +optional
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// MaxScrapeSize defines a maximum size of scraped data for a job
// +optional
MaxScrapeSize string `json:"max_scrape_size,omitempty"`
// OAuth2 defines auth configuration
// +optional
OAuth2 *OAuth2 `json:"oauth2,omitempty"`
Expand Down
3 changes: 3 additions & 0 deletions api/v1beta1/vmpodscrape_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ type PodMetricsEndpoint struct {
// Timeout after which the scrape is ended
// +optional
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// MaxScrapeSize defines a maximum size of scraped data for a job
// +optional
MaxScrapeSize string `json:"max_scrape_size,omitempty"`
// SampleLimit defines per-podEndpoint limit on number of scraped samples that will be accepted.
// +optional
SampleLimit uint64 `json:"sampleLimit,omitempty"`
Expand Down
3 changes: 3 additions & 0 deletions api/v1beta1/vmscrapeconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ type VMScrapeConfigSpec struct {
// ScrapeTimeout is the number of seconds to wait until a scrape request times out.
// +optional
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// MaxScrapeSize defines a maximum size of scraped data for a job
// +optional
MaxScrapeSize string `json:"max_scrape_size,omitempty"`
// HonorTimestamps controls whether to respect the timestamps present in scraped data.
// +optional
HonorTimestamps *bool `json:"honorTimestamps,omitempty"`
Expand Down
3 changes: 3 additions & 0 deletions api/v1beta1/vmservicescrape_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,9 @@ type Endpoint struct {
// Timeout after which the scrape is ended
// +optional
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// MaxScrapeSize defines a maximum size of scraped data for a job
// +optional
MaxScrapeSize string `json:"max_scrape_size,omitempty"`
// SampleLimit defines per-endpoint limit on number of scraped samples that will be accepted.
// +optional
SampleLimit uint64 `json:"sampleLimit,omitempty"`
Expand Down
3 changes: 3 additions & 0 deletions api/v1beta1/vmstaticscrape_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@ type TargetEndpoint struct {
// Timeout after which the scrape is ended
// +optional
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// MaxScrapeSize defines a maximum size of scraped data for a job
// +optional
MaxScrapeSize string `json:"max_scrape_size,omitempty"`
// OAuth2 defines auth configuration
// +optional
OAuth2 *OAuth2 `json:"oauth2,omitempty"`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,9 @@ spec:
jobLabel:
description: The label to use to retrieve the job name from.
type: string
max_scrape_size:
description: MaxScrapeSize defines a maximum size of scraped data for a job
type: string
metricRelabelConfigs:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,9 @@ spec:
interval:
description: Interval at which metrics should be scraped
type: string
max_scrape_size:
description: MaxScrapeSize defines a maximum size of scraped data for a job
type: string
metricRelabelConfigs:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2359,6 +2359,9 @@ spec:
- role
type: object
type: array
max_scrape_size:
description: MaxScrapeSize defines a maximum size of scraped data for a job
type: string
metricRelabelConfigs:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,9 @@ spec:
interval:
description: Interval at which metrics should be scraped
type: string
max_scrape_size:
description: MaxScrapeSize defines a maximum size of scraped data for a job
type: string
metricRelabelConfigs:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,9 @@ spec:
type: string
description: Labels static labels for targets.
type: object
max_scrape_size:
description: MaxScrapeSize defines a maximum size of scraped data for a job
type: string
metricRelabelConfigs:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
Expand Down
2 changes: 2 additions & 0 deletions docs/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ aliases:

- [vmalertmanagerconfig](./api.md#vmalertmanagerconfig): adds missing `handleReconcileErr` callback to the reconcile loop. It must properly handle errors and deregister objects.

- [vmservicescrape](./api.md#vmservicescrape), [vmpodscrape](./api.md#vmpodscrape), [vmnodescrape](./api.md#vmnodescrape) - added `max_scrape_size` parameter for scrape protocols configuration

<a name="v0.45.0"></a>

## [v0.45.0](https://github.com/VictoriaMetrics/operator/releases/tag/v0.45.0) - 10 Jun 2024
Expand Down
3 changes: 3 additions & 0 deletions internal/controller/factory/vmagent/nodescrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ func generateNodeScrapeConfig(
if nodeSpec.ScrapeTimeout != "" {
cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: nodeSpec.ScrapeTimeout})
}
if nodeSpec.MaxScrapeSize != "" {
cfg = append(cfg, yaml.MapItem{Key: "max_scrape_size", Value: nodeSpec.MaxScrapeSize})
}
if nodeSpec.Path != "" {
cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: nodeSpec.Path})
}
Expand Down
2 changes: 2 additions & 0 deletions internal/controller/factory/vmagent/nodescrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ relabel_configs:
FollowRedirects: pointer.Bool(true),
ScrapeTimeout: "10s",
ScrapeInterval: "5s",
MaxScrapeSize: "10MB",
Params: map[string][]string{"module": {"client"}},
JobLabel: "env",
HonorTimestamps: pointer.Bool(true),
Expand Down Expand Up @@ -135,6 +136,7 @@ kubernetes_sd_configs:
- role: node
scrape_interval: 5s
scrape_timeout: 10s
max_scrape_size: 10MB
metrics_path: /metrics
proxy_url: https://some-url
sample_limit: 50
Expand Down
3 changes: 3 additions & 0 deletions internal/controller/factory/vmagent/podscrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ func generatePodScrapeConfig(
if ep.ScrapeTimeout != "" {
cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: ep.ScrapeTimeout})
}
if ep.MaxScrapeSize != "" {
cfg = append(cfg, yaml.MapItem{Key: "max_scrape_size", Value: ep.MaxScrapeSize})
}
if ep.Path != "" {
cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: ep.Path})
}
Expand Down
6 changes: 4 additions & 2 deletions internal/controller/factory/vmagent/podscrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,9 @@ func Test_generatePodScrapeConfig(t *testing.T) {
},
},
ep: vmv1beta1.PodMetricsEndpoint{
Path: "/metric",
Port: "web",
MaxScrapeSize: "30B",
Path: "/metric",
Port: "web",
AttachMetadata: vmv1beta1.AttachMetadata{
Node: pointer.Bool(true),
},
Expand All @@ -56,6 +57,7 @@ kubernetes_sd_configs:
namespaces:
names:
- default
max_scrape_size: 30B
metrics_path: /metric
relabel_configs:
- action: drop
Expand Down
3 changes: 3 additions & 0 deletions internal/controller/factory/vmagent/servicescrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,9 @@ func generateServiceScrapeConfig(
if ep.ScrapeTimeout != "" {
cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: ep.ScrapeTimeout})
}
if ep.MaxScrapeSize != "" {
cfg = append(cfg, yaml.MapItem{Key: "max_scrape_size", Value: ep.MaxScrapeSize})
}
if ep.Path != "" {
cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: ep.Path})
}
Expand Down
2 changes: 2 additions & 0 deletions internal/controller/factory/vmagent/servicescrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ relabel_configs:
},
BearerTokenFile: "/var/run/tolen",
ScrapeInterval: "60m",
MaxScrapeSize: "1MB",
},
i: 0,
apiserverConfig: nil,
Expand All @@ -202,6 +203,7 @@ kubernetes_sd_configs:
names:
- default
scrape_interval: 40m
max_scrape_size: 1MB
tls_config:
insecure_skip_verify: false
ca_file: /etc/vmagent-tls/certs/default_tls-secret_ca
Expand Down
3 changes: 3 additions & 0 deletions internal/controller/factory/vmagent/staticscrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ func generateStaticScrapeConfig(
if ep.ScrapeTimeout != "" {
cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: ep.ScrapeTimeout})
}
if ep.MaxScrapeSize != "" {
cfg = append(cfg, yaml.MapItem{Key: "max_scrape_size", Value: ep.MaxScrapeSize})
}
if ep.Path != "" {
cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: ep.Path})
}
Expand Down
2 changes: 2 additions & 0 deletions internal/controller/factory/vmagent/staticscrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ metric_relabel_configs: []
InsecureSkipVerify: true,
},
ScrapeTimeout: "55s",
MaxScrapeSize: "2KB",
Interval: "10s",
ScrapeInterval: "50s",
FollowRedirects: pointer.Bool(true),
Expand Down Expand Up @@ -245,6 +246,7 @@ static_configs:
group: prod
scrape_interval: 50s
scrape_timeout: 55s
max_scrape_size: 2KB
metrics_path: /metrics-1
proxy_url: https://some-proxy
follow_redirects: true
Expand Down

0 comments on commit bc838c7

Please sign in to comment.