diff --git a/cloudflare.go b/cloudflare.go index f1a3037..106a447 100644 --- a/cloudflare.go +++ b/cloudflare.go @@ -221,6 +221,11 @@ type lbResp struct { ZoneTag string `json:"zoneTag"` } +func getTruncatedNow() time.Time { + // truncate datetime down to YYYY-mm-dd HH:MM:00.000 + return time.Now().Add(-time.Duration(cfgScrapeDelay) * time.Second).Truncate(time.Minute).UTC() +} + func fetchZones() []cloudflare.Zone { var api *cloudflare.API var err error @@ -263,18 +268,15 @@ func fetchAccounts() []cloudflare.Account { return a } -func fetchZoneTotals(zoneIDs []string) (*cloudflareResponse, error) { - now := time.Now().Add(-time.Duration(cfgScrapeDelay) * time.Second).UTC() - s := 60 * time.Second - now = now.Truncate(s) - now1mAgo := now.Add(-60 * time.Second) +func fetchZoneTotals(zoneIDs []string, lastSuccessfulTime *time.Time) (*cloudflareResponse, error) { + truncatedNow := getTruncatedNow() request := graphql.NewRequest(` query ($zoneIDs: [String!], $mintime: Time!, $maxtime: Time!, $limit: Int!) { viewer { zones(filter: { zoneTag_in: $zoneIDs }) { zoneTag - httpRequests1mGroups(limit: $limit filter: { datetime: $maxtime }) { + httpRequests1mGroups(limit: $limit filter: { datetime_geq: $mintime, datetime_lt: $maxtime }) { uniq { uniques } @@ -372,8 +374,8 @@ query ($zoneIDs: [String!], $mintime: Time!, $maxtime: Time!, $limit: Int!) { request.Header.Set("X-AUTH-KEY", cfgCfAPIKey) } request.Var("limit", 9999) - request.Var("maxtime", now) - request.Var("mintime", now1mAgo) + request.Var("maxtime", truncatedNow) + request.Var("mintime", *lastSuccessfulTime) request.Var("zoneIDs", zoneIDs) ctx := context.Background() @@ -381,18 +383,16 @@ query ($zoneIDs: [String!], $mintime: Time!, $maxtime: Time!, $limit: Int!) { var resp cloudflareResponse if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("%s: from %s to %s", err, *lastSuccessfulTime, truncatedNow) return nil, err } - + log.Debugf("successful from %s to %s", *lastSuccessfulTime, truncatedNow) + *lastSuccessfulTime = truncatedNow return &resp, nil } -func fetchColoTotals(zoneIDs []string) (*cloudflareResponseColo, error) { - now := time.Now().Add(-time.Duration(cfgScrapeDelay) * time.Second).UTC() - s := 60 * time.Second - now = now.Truncate(s) - now1mAgo := now.Add(-60 * time.Second) +func fetchColoTotals(zoneIDs []string, lastSuccessfulTime *time.Time) (*cloudflareResponseColo, error) { + truncatedNow := getTruncatedNow() request := graphql.NewRequest(` query ($zoneIDs: [String!], $mintime: Time!, $maxtime: Time!, $limit: Int!) { @@ -428,26 +428,24 @@ func fetchColoTotals(zoneIDs []string) (*cloudflareResponseColo, error) { request.Header.Set("X-AUTH-KEY", cfgCfAPIKey) } request.Var("limit", 9999) - request.Var("maxtime", now) - request.Var("mintime", now1mAgo) + request.Var("maxtime", truncatedNow) + request.Var("mintime", *lastSuccessfulTime) request.Var("zoneIDs", zoneIDs) ctx := context.Background() graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseColo if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("%s: from %s to %s", err, *lastSuccessfulTime, truncatedNow) return nil, err } - + log.Debugf("successful from %s to %s", *lastSuccessfulTime, truncatedNow) + *lastSuccessfulTime = truncatedNow return &resp, nil } -func fetchWorkerTotals(accountID string) (*cloudflareResponseAccts, error) { - now := time.Now().Add(-time.Duration(cfgScrapeDelay) * time.Second).UTC() - s := 60 * time.Second - now = now.Truncate(s) - now1mAgo := now.Add(-60 * time.Second) +func fetchWorkerTotals(accountID string, lastSuccessfulTime *time.Time) (*cloudflareResponseAccts, error) { + truncatedNow := getTruncatedNow() request := graphql.NewRequest(` query ($accountID: String!, $mintime: Time!, $maxtime: Time!, $limit: Int!) { @@ -488,26 +486,24 @@ func fetchWorkerTotals(accountID string) (*cloudflareResponseAccts, error) { request.Header.Set("X-AUTH-KEY", cfgCfAPIKey) } request.Var("limit", 9999) - request.Var("maxtime", now) - request.Var("mintime", now1mAgo) + request.Var("maxtime", truncatedNow) + request.Var("mintime", *lastSuccessfulTime) request.Var("accountID", accountID) ctx := context.Background() graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseAccts if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("%s: from %s to %s", err, *lastSuccessfulTime, truncatedNow) return nil, err } - + log.Debugf("successful from %s to %s", *lastSuccessfulTime, truncatedNow) + *lastSuccessfulTime = truncatedNow return &resp, nil } -func fetchLoadBalancerTotals(zoneIDs []string) (*cloudflareResponseLb, error) { - now := time.Now().Add(-time.Duration(cfgScrapeDelay) * time.Second).UTC() - s := 60 * time.Second - now = now.Truncate(s) - now1mAgo := now.Add(-60 * time.Second) +func fetchLoadBalancerTotals(zoneIDs []string, lastSuccessfulTime *time.Time) (*cloudflareResponseLb, error) { + truncatedNow := getTruncatedNow() request := graphql.NewRequest(` query ($zoneIDs: [String!], $mintime: Time!, $maxtime: Time!, $limit: Int!) { @@ -565,17 +561,19 @@ func fetchLoadBalancerTotals(zoneIDs []string) (*cloudflareResponseLb, error) { request.Header.Set("X-AUTH-KEY", cfgCfAPIKey) } request.Var("limit", 9999) - request.Var("maxtime", now) - request.Var("mintime", now1mAgo) + request.Var("maxtime", truncatedNow) + request.Var("mintime", *lastSuccessfulTime) request.Var("zoneIDs", zoneIDs) ctx := context.Background() graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseLb if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("%s: from %s to %s", err, *lastSuccessfulTime, truncatedNow) return nil, err } + log.Debugf("successful from %s to %s", *lastSuccessfulTime, truncatedNow) + *lastSuccessfulTime = truncatedNow return &resp, nil } diff --git a/main.go b/main.go index d07c180..ab0f170 100644 --- a/main.go +++ b/main.go @@ -26,6 +26,22 @@ var ( cfgFreeTier = false cfgBatchSize = 10 cfgMetricsDenylist = "" + cfgLogLevel = "info" +) + +type AccountJob struct { + Account cloudflare.Account + LastSuccessfulTime time.Time +} + +type BatchedZonesJob struct { + Zones []cloudflare.Zone + LastSuccessfulTime time.Time +} + +var ( + cfAccounts []cloudflare.Account + cfFilteredZones []cloudflare.Zone ) func getTargetZones() []string { @@ -100,16 +116,34 @@ func filterExcludedZones(all []cloudflare.Zone, exclude []string) []cloudflare.Z return filtered } -func fetchMetrics() { +func fetchMetrics(fetchWorkerAnalyticsJobs []AccountJob, fetchZoneAnalyticsJobs []BatchedZonesJob, fetchZoneColocationAnalyticsJobs []BatchedZonesJob, fetchLoadBalancerAnalyticsJobs []BatchedZonesJob) { var wg sync.WaitGroup - zones := fetchZones() - accounts := fetchAccounts() - filteredZones := filterExcludedZones(filterZones(zones, getTargetZones()), getExcludedZones()) - for _, a := range accounts { - go fetchWorkerAnalytics(a, &wg) + for i := range fetchWorkerAnalyticsJobs { + job := &fetchWorkerAnalyticsJobs[i] + go fetchWorkerAnalytics(job.Account, &wg, &job.LastSuccessfulTime) + } + + for i := range fetchZoneAnalyticsJobs { + job := &fetchZoneAnalyticsJobs[i] + go fetchZoneAnalytics(job.Zones, &wg, &job.LastSuccessfulTime) + } + for i := range fetchZoneColocationAnalyticsJobs { + job := &fetchZoneColocationAnalyticsJobs[i] + go fetchZoneColocationAnalytics(job.Zones, &wg, &job.LastSuccessfulTime) + } + for i := range fetchLoadBalancerAnalyticsJobs { + job := &fetchLoadBalancerAnalyticsJobs[i] + go fetchLoadBalancerAnalytics(job.Zones, &wg, &job.LastSuccessfulTime) } + wg.Wait() +} + +func prepareExporterBatchJobs() []BatchedZonesJob { + jobs := []BatchedZonesJob{} + filteredZones := cfFilteredZones + lastNow := getTruncatedNow().Add(-time.Minute) // Make requests in groups of cfgBatchSize to avoid rate limit // 10 is the maximum amount of zones you can request at once for len(filteredZones) > 0 { @@ -117,16 +151,27 @@ func fetchMetrics() { if len(filteredZones) < cfgBatchSize { sliceLength = len(filteredZones) } - targetZones := filteredZones[:sliceLength] - filteredZones = filteredZones[len(targetZones):] - - go fetchZoneAnalytics(targetZones, &wg) - go fetchZoneColocationAnalytics(targetZones, &wg) - go fetchLoadBalancerAnalytics(targetZones, &wg) + jobs = append(jobs, BatchedZonesJob{ + Zones: targetZones, + LastSuccessfulTime: lastNow, + }) + filteredZones = filteredZones[sliceLength:] } + return jobs +} - wg.Wait() +func prepareAccountJobs() []AccountJob { + jobs := []AccountJob{} + lastSuccessfulTime := getTruncatedNow().Add(-time.Minute) + for i := range cfAccounts { + cfAccount := &cfAccounts[i] + jobs = append(jobs, AccountJob{ + Account: *cfAccount, + LastSuccessfulTime: lastSuccessfulTime, + }) + } + return jobs } func main() { @@ -141,6 +186,7 @@ func main() { flag.IntVar(&cfgBatchSize, "cf_batch_size", cfgBatchSize, "cloudflare zones batch size (1-10), defaults to 10") flag.BoolVar(&cfgFreeTier, "free_tier", cfgFreeTier, "scrape only metrics included in free plan") flag.StringVar(&cfgMetricsDenylist, "metrics_denylist", cfgMetricsDenylist, "metrics to not expose, comma delimited list") + flag.StringVar(&cfgLogLevel, "log_level", cfgLogLevel, "log level, default to warning") flag.Parse() if !(len(cfgCfAPIToken) > 0 || (len(cfgCfAPIEmail) > 0 && len(cfgCfAPIKey) > 0)) { log.Fatal("Please provide CF_API_KEY+CF_API_EMAIL or CF_API_TOKEN") @@ -148,10 +194,13 @@ func main() { if cfgBatchSize < 1 || cfgBatchSize > 10 { log.Fatal("CF_BATCH_SIZE must be between 1 and 10") } - customFormatter := new(log.TextFormatter) - customFormatter.TimestampFormat = "2006-01-02 15:04:05" + customFormatter := new(log.JSONFormatter) + customFormatter.TimestampFormat = "2006-01-02 15:04:05.000" log.SetFormatter(customFormatter) - customFormatter.FullTimestamp = true + log.SetReportCaller(true) + if parsedLogLevel, err := log.ParseLevel(cfgLogLevel); err == nil { + log.SetLevel(parsedLogLevel) + } metricsDenylist := []string{} if len(cfgMetricsDenylist) > 0 { @@ -163,9 +212,16 @@ func main() { } mustRegisterMetrics(deniedMetricsSet) + cfAccounts = fetchAccounts() + cfFilteredZones = filterExcludedZones(filterZones(fetchZones(), getTargetZones()), getExcludedZones()) go func() { - for ; true; <-time.NewTicker(60 * time.Second).C { - go fetchMetrics() + fetchWorkerAnalyticsJobs := prepareAccountJobs() + fetchZoneAnalyticsJobs := prepareExporterBatchJobs() + fetchZoneColocationAnalyticsJobs := prepareExporterBatchJobs() + fetchLoadBalancerAnalyticsJobs := prepareExporterBatchJobs() + + for ; true; <-time.NewTicker(time.Minute).C { + go fetchMetrics(fetchWorkerAnalyticsJobs, fetchZoneAnalyticsJobs, fetchZoneColocationAnalyticsJobs, fetchLoadBalancerAnalyticsJobs) } }() diff --git a/prometheus.go b/prometheus.go index d10ef20..4d0fc49 100644 --- a/prometheus.go +++ b/prometheus.go @@ -4,10 +4,12 @@ import ( "fmt" "strconv" "sync" + "time" "github.com/biter777/countries" cloudflare "github.com/cloudflare/cloudflare-go" "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" ) type MetricName string @@ -385,11 +387,11 @@ func mustRegisterMetrics(deniedMetrics MetricsSet) { } } -func fetchWorkerAnalytics(account cloudflare.Account, wg *sync.WaitGroup) { +func fetchWorkerAnalytics(account cloudflare.Account, wg *sync.WaitGroup, lastSuccessfulTime *time.Time) { wg.Add(1) defer wg.Done() - r, err := fetchWorkerTotals(account.ID) + r, err := fetchWorkerTotals(account.ID, lastSuccessfulTime) if err != nil { return } @@ -410,7 +412,7 @@ func fetchWorkerAnalytics(account cloudflare.Account, wg *sync.WaitGroup) { } } -func fetchZoneColocationAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { +func fetchZoneColocationAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup, lastSuccessfulTime *time.Time) { wg.Add(1) defer wg.Done() @@ -424,7 +426,7 @@ func fetchZoneColocationAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { return } - r, err := fetchColoTotals(zoneIDs) + r, err := fetchColoTotals(zoneIDs, lastSuccessfulTime) if err != nil { return } @@ -441,7 +443,7 @@ func fetchZoneColocationAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { } } -func fetchZoneAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { +func fetchZoneAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup, lastSuccessfulTime *time.Time) { wg.Add(1) defer wg.Done() @@ -455,129 +457,126 @@ func fetchZoneAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { return } - r, err := fetchZoneTotals(zoneIDs) + r, err := fetchZoneTotals(zoneIDs, lastSuccessfulTime) if err != nil { return } - for _, z := range r.Viewer.Zones { - name := findZoneName(zones, z.ZoneTag) - addHTTPGroups(&z, name) - addFirewallGroups(&z, name) - addHealthCheckGroups(&z, name) - addHTTPAdaptiveGroups(&z, name) + for i := range r.Viewer.Zones { + zone := &r.Viewer.Zones[i] + name := findZoneName(zones, zone.ZoneTag) + addHTTPGroups(zone, name) + addFirewallGroups(zone, name) + addHealthCheckGroups(zone, name) + addHTTPAdaptiveGroups(zone, name) } } func addHTTPGroups(z *zoneResp, name string) { - // Nothing to do. - if len(z.HTTP1mGroups) == 0 { - return - } - zt := z.HTTP1mGroups[0] + log.Debug("len(z.HTTP1mGroups) = ", len(z.HTTP1mGroups)) + for i := range z.HTTP1mGroups { + zt := &z.HTTP1mGroups[i] - zoneRequestTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.Requests)) - zoneRequestCached.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.CachedRequests)) - zoneRequestSSLEncrypted.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.EncryptedRequests)) + zoneRequestTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.Requests)) + zoneRequestCached.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.CachedRequests)) + zoneRequestSSLEncrypted.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.EncryptedRequests)) - for _, ct := range zt.Sum.ContentType { - zoneRequestContentType.With(prometheus.Labels{"zone": name, "content_type": ct.EdgeResponseContentType}).Add(float64(ct.Requests)) - zoneBandwidthContentType.With(prometheus.Labels{"zone": name, "content_type": ct.EdgeResponseContentType}).Add(float64(ct.Bytes)) - } + for _, ct := range zt.Sum.ContentType { + zoneRequestContentType.With(prometheus.Labels{"zone": name, "content_type": ct.EdgeResponseContentType}).Add(float64(ct.Requests)) + zoneBandwidthContentType.With(prometheus.Labels{"zone": name, "content_type": ct.EdgeResponseContentType}).Add(float64(ct.Bytes)) + } - for _, country := range zt.Sum.Country { - c := countries.ByName(country.ClientCountryName) - region := c.Info().Region.Info().Name + for _, country := range zt.Sum.Country { + c := countries.ByName(country.ClientCountryName) + region := c.Info().Region.Info().Name - zoneRequestCountry.With(prometheus.Labels{"zone": name, "country": country.ClientCountryName, "region": region}).Add(float64(country.Requests)) - zoneBandwidthCountry.With(prometheus.Labels{"zone": name, "country": country.ClientCountryName, "region": region}).Add(float64(country.Bytes)) - zoneThreatsCountry.With(prometheus.Labels{"zone": name, "country": country.ClientCountryName, "region": region}).Add(float64(country.Threats)) - } + zoneRequestCountry.With(prometheus.Labels{"zone": name, "country": country.ClientCountryName, "region": region}).Add(float64(country.Requests)) + zoneBandwidthCountry.With(prometheus.Labels{"zone": name, "country": country.ClientCountryName, "region": region}).Add(float64(country.Bytes)) + zoneThreatsCountry.With(prometheus.Labels{"zone": name, "country": country.ClientCountryName, "region": region}).Add(float64(country.Threats)) + } - for _, status := range zt.Sum.ResponseStatus { - zoneRequestHTTPStatus.With(prometheus.Labels{"zone": name, "status": strconv.Itoa(status.EdgeResponseStatus)}).Add(float64(status.Requests)) - } + for _, status := range zt.Sum.ResponseStatus { + zoneRequestHTTPStatus.With(prometheus.Labels{"zone": name, "status": strconv.Itoa(status.EdgeResponseStatus)}).Add(float64(status.Requests)) + } - for _, browser := range zt.Sum.BrowserMap { - zoneRequestBrowserMap.With(prometheus.Labels{"zone": name, "family": browser.UaBrowserFamily}).Add(float64(browser.PageViews)) - } + for _, browser := range zt.Sum.BrowserMap { + zoneRequestBrowserMap.With(prometheus.Labels{"zone": name, "family": browser.UaBrowserFamily}).Add(float64(browser.PageViews)) + } - zoneBandwidthTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.Bytes)) - zoneBandwidthCached.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.CachedBytes)) - zoneBandwidthSSLEncrypted.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.EncryptedBytes)) + zoneBandwidthTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.Bytes)) + zoneBandwidthCached.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.CachedBytes)) + zoneBandwidthSSLEncrypted.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.EncryptedBytes)) - zoneThreatsTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.Threats)) + zoneThreatsTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.Threats)) - for _, t := range zt.Sum.ThreatPathing { - zoneThreatsType.With(prometheus.Labels{"zone": name, "type": t.Name}).Add(float64(t.Requests)) - } + for _, t := range zt.Sum.ThreatPathing { + zoneThreatsType.With(prometheus.Labels{"zone": name, "type": t.Name}).Add(float64(t.Requests)) + } - zonePageviewsTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.PageViews)) + zonePageviewsTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Sum.PageViews)) - // Uniques - zoneUniquesTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Unique.Uniques)) + // Uniques + zoneUniquesTotal.With(prometheus.Labels{"zone": name}).Add(float64(zt.Unique.Uniques)) + } } func addFirewallGroups(z *zoneResp, name string) { - // Nothing to do. - if len(z.FirewallEventsAdaptiveGroups) == 0 { - return - } - - for _, g := range z.FirewallEventsAdaptiveGroups { + log.Debug("len(z.FirewallEventsAdaptiveGroups) = ", len(z.FirewallEventsAdaptiveGroups)) + for i := range z.FirewallEventsAdaptiveGroups { + firewallEventAdaptiveGroup := &z.FirewallEventsAdaptiveGroups[i] zoneFirewallEventsCount.With( prometheus.Labels{ "zone": name, - "action": g.Dimensions.Action, - "source": g.Dimensions.Source, - "host": g.Dimensions.ClientRequestHTTPHost, - "country": g.Dimensions.ClientCountryName, - }).Add(float64(g.Count)) + "action": firewallEventAdaptiveGroup.Dimensions.Action, + "source": firewallEventAdaptiveGroup.Dimensions.Source, + "host": firewallEventAdaptiveGroup.Dimensions.ClientRequestHTTPHost, + "country": firewallEventAdaptiveGroup.Dimensions.ClientCountryName, + }).Add(float64(firewallEventAdaptiveGroup.Count)) } } func addHealthCheckGroups(z *zoneResp, name string) { - if len(z.HealthCheckEventsAdaptiveGroups) == 0 { - return - } - - for _, g := range z.HealthCheckEventsAdaptiveGroups { + log.Debug("len(z.HealthCheckEventsAdaptiveGroups) = ", len(z.HealthCheckEventsAdaptiveGroups)) + for i := range z.HealthCheckEventsAdaptiveGroups { + healthCheckEventsAdaptiveGroup := &z.HealthCheckEventsAdaptiveGroups[i] zoneHealthCheckEventsOriginCount.With( prometheus.Labels{ "zone": name, - "health_status": g.Dimensions.HealthStatus, - "origin_ip": g.Dimensions.OriginIP, - "region": g.Dimensions.Region, - "fqdn": g.Dimensions.Fqdn, - }).Add(float64(g.Count)) + "health_status": healthCheckEventsAdaptiveGroup.Dimensions.HealthStatus, + "origin_ip": healthCheckEventsAdaptiveGroup.Dimensions.OriginIP, + "region": healthCheckEventsAdaptiveGroup.Dimensions.Region, + "fqdn": healthCheckEventsAdaptiveGroup.Dimensions.Fqdn, + }).Add(float64(healthCheckEventsAdaptiveGroup.Count)) } } func addHTTPAdaptiveGroups(z *zoneResp, name string) { - - for _, g := range z.HTTPRequestsAdaptiveGroups { + log.Debug("len(z.HTTPRequestsAdaptiveGroups) = ", len(z.HTTPRequestsAdaptiveGroups)) + for i := range z.HTTPRequestsAdaptiveGroups { + httpRequestsAdaptiveGroup := &z.HTTPRequestsAdaptiveGroups[i] zoneRequestOriginStatusCountryHost.With( prometheus.Labels{ "zone": name, - "status": strconv.Itoa(int(g.Dimensions.OriginResponseStatus)), - "country": g.Dimensions.ClientCountryName, - "host": g.Dimensions.ClientRequestHTTPHost, - }).Add(float64(g.Count)) + "status": strconv.Itoa(int(httpRequestsAdaptiveGroup.Dimensions.OriginResponseStatus)), + "country": httpRequestsAdaptiveGroup.Dimensions.ClientCountryName, + "host": httpRequestsAdaptiveGroup.Dimensions.ClientRequestHTTPHost, + }).Add(float64(httpRequestsAdaptiveGroup.Count)) } - for _, g := range z.HTTPRequestsEdgeCountryHost { + log.Debug("len(z.HTTPRequestsEdgeCountryHost) = ", len(z.HTTPRequestsEdgeCountryHost)) + for i := range z.HTTPRequestsEdgeCountryHost { + httpRequestsEdgeCountryHost := &z.HTTPRequestsEdgeCountryHost[i] zoneRequestStatusCountryHost.With( prometheus.Labels{ "zone": name, - "status": strconv.Itoa(int(g.Dimensions.EdgeResponseStatus)), - "country": g.Dimensions.ClientCountryName, - "host": g.Dimensions.ClientRequestHTTPHost, - }).Add(float64(g.Count)) + "status": strconv.Itoa(int(httpRequestsEdgeCountryHost.Dimensions.EdgeResponseStatus)), + "country": httpRequestsEdgeCountryHost.Dimensions.ClientCountryName, + "host": httpRequestsEdgeCountryHost.Dimensions.ClientRequestHTTPHost, + }).Add(float64(httpRequestsEdgeCountryHost.Count)) } - } -func fetchLoadBalancerAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { +func fetchLoadBalancerAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup, lastSuccessfulTime *time.Time) { wg.Add(1) defer wg.Done() @@ -591,7 +590,7 @@ func fetchLoadBalancerAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { return } - l, err := fetchLoadBalancerTotals(zoneIDs) + l, err := fetchLoadBalancerTotals(zoneIDs, lastSuccessfulTime) if err != nil { return } @@ -603,7 +602,7 @@ func fetchLoadBalancerAnalytics(zones []cloudflare.Zone, wg *sync.WaitGroup) { } func addLoadBalancingRequestsAdaptiveGroups(z *lbResp, name string) { - + log.Debug("len(z.LoadBalancingRequestsAdaptiveGroups) = ", len(z.LoadBalancingRequestsAdaptiveGroups)) for _, g := range z.LoadBalancingRequestsAdaptiveGroups { poolRequestsTotal.With( prometheus.Labels{ @@ -616,7 +615,7 @@ func addLoadBalancingRequestsAdaptiveGroups(z *lbResp, name string) { } func addLoadBalancingRequestsAdaptive(z *lbResp, name string) { - + log.Debug("len(z.LoadBalancingRequestsAdaptive) = ", len(z.LoadBalancingRequestsAdaptive)) for _, g := range z.LoadBalancingRequestsAdaptive { for _, p := range g.Pools { poolHealthStatus.With( @@ -627,5 +626,4 @@ func addLoadBalancingRequestsAdaptive(z *lbResp, name string) { }).Set(float64(p.Healthy)) } } - }