Skip to content

Commit

Permalink
[chore] Fix whitespace errors (#35619)
Browse files Browse the repository at this point in the history
<!--Ex. Fixing a bug - Describe the bug and how this fixes the issue.
Ex. Adding a feature - Explain what this achieves.-->
#### Description
This PR fixes whitespace errors where two spaces are used together when
only one is necessary. Also, fix one typo of `reccommend` ->
`recommend`.
  • Loading branch information
crobert-1 authored Oct 4, 2024
1 parent 4b1e300 commit 8e400f4
Show file tree
Hide file tree
Showing 31 changed files with 45 additions and 45 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/prometheus-compliance-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ jobs:
- name: Copy binary to compliance directory
# The required name of the downloaded artifact is `otelcol_0.42.0_linux_amd64`, so we place the collector contrib artifact under the same name in the bin folder to run.
# Source: https://github.com/prometheus/compliance/blob/12cbdf92abf7737531871ab7620a2de965fc5382/remote_write_sender/targets/otel.go#L8
run: mkdir compliance/remotewrite/sender/bin && cp opentelemetry-collector-contrib/bin/otelcontribcol_linux_amd64 compliance/remotewrite/sender/bin/otelcol_0.42.0_linux_amd64
run: mkdir compliance/remotewrite/sender/bin && cp opentelemetry-collector-contrib/bin/otelcontribcol_linux_amd64 compliance/remotewrite/sender/bin/otelcol_0.42.0_linux_amd64
- name: clean up mod file
run: go mod tidy
working-directory: compliance/remotewrite/sender
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -558,4 +558,4 @@ checks:
$(MAKE) gendistributions
$(MAKE) -j4 generate
$(MAKE) multimod-verify
git diff --exit-code || (echo 'Some files need committing' && git status && exit 1)
git diff --exit-code || (echo 'Some files need committing' && git status && exit 1)
20 changes: 10 additions & 10 deletions Makefile.Common
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ endif
# SRC_ROOT is the top of the source tree.
SRC_ROOT := $(shell git rev-parse --show-toplevel)
# SRC_PARENT_DIR is the absolute path of source tree's parent directory
SRC_PARENT_DIR := $(shell dirname $(SRC_ROOT))
SRC_PARENT_DIR := $(shell dirname $(SRC_ROOT))

# build tags required by any component should be defined as an independent variables and later added to GO_BUILD_TAGS below
GO_BUILD_TAGS=""
Expand Down Expand Up @@ -89,7 +89,7 @@ ALL_SRC := $(shell find $(ALL_PKG_DIRS) -name '*.go' \
-not -path '*/local/*' \
-type f | sort)

ALL_SRC_AND_SHELL := find . -type f \( -iname '*.go' -o -iname "*.sh" \) ! -path '**/third_party/*' | sort
ALL_SRC_AND_SHELL := find . -type f \( -iname '*.go' -o -iname "*.sh" \) ! -path '**/third_party/*' | sort

# All source code and documents. Used in spell check.
ALL_SRC_AND_DOC_CMD := find $(ALL_PKG_DIRS) -name "*.md" -o -name "*.go" -o -name "*.yaml" -not -path '*/third_party/*' -type f | sort
Expand Down Expand Up @@ -137,24 +137,24 @@ test-with-cover: $(GOTESTSUM)
.PHONY: do-unit-tests-with-cover
do-unit-tests-with-cover: $(GOTESTSUM)
@echo "running $(GOCMD) unit test ./... + coverage in `pwd`"
$(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_COVERAGE)
$(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_COVERAGE)
$(GOCMD) tool cover -html=coverage.txt -o coverage.html

.PHONY: mod-integration-test
mod-integration-test: $(GOTESTSUM)
@echo "running $(GOCMD) integration test ./... in `pwd`"
$(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_INTEGRATION)
@if [ -e integration-coverage.txt ]; then \
$(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \
fi
$(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \
fi

.PHONY: do-integration-tests-with-cover
do-integration-tests-with-cover: $(GOTESTSUM)
@echo "running $(GOCMD) integration test ./... + coverage in `pwd`"
$(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_INTEGRATION_COVERAGE)
$(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_INTEGRATION_COVERAGE)
@if [ -e integration-coverage.txt ]; then \
$(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \
fi
$(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \
fi

.PHONY: benchmark
benchmark: $(GOTESTSUM)
Expand Down Expand Up @@ -195,8 +195,8 @@ checklinks:

.PHONY: fmt
fmt: $(GOIMPORTS)
gofmt -w -s ./
$(GOIMPORTS) -w -local github.com/open-telemetry/opentelemetry-collector-contrib ./
gofmt -w -s ./
$(GOIMPORTS) -w -local github.com/open-telemetry/opentelemetry-collector-contrib ./

.PHONY: lint
lint: $(LINT) checklicense misspell
Expand Down
2 changes: 1 addition & 1 deletion confmap/provider/s3provider/README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
## Summary
This package provides a `ConfigMapProvider` implementation for Amazon S3 (`s3provider`) that allows the Collector the ability to load configuration by fetching and reading config objects stored in Amazon S3.
This package provides a `ConfigMapProvider` implementation for Amazon S3 (`s3provider`) that allows the Collector the ability to load configuration by fetching and reading config objects stored in Amazon S3.
## How it works
- It will be called by `ConfigMapResolver` to load configuration for the Collector.
- By giving a config URI starting with prefix `s3://`, this `s3provider` will be used to download config objects from the given S3 URIs, and then use the downloaded configuration during Collector initialization.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func createTracesAndSpan() (ptrace.Traces, ptrace.Span) {
attrs.PutInt("attr2", 40)
attrs.PutDouble("attr3", 3.14)

// add a span
// add a span
spans := rs.ScopeSpans().AppendEmpty().Spans()
spans.EnsureCapacity(1)
span := spans.AppendEmpty()
Expand Down
2 changes: 1 addition & 1 deletion exporter/awsemfexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ The following exporter configuration parameters are supported.
| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` |
| `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` |
| `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` |
| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] |
| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] |
| [`metric_declarations`](#metric_declaration) | List of rules for filtering exported metrics and their dimensions. | [ ] |
| [`metric_descriptors`](#metric_descriptor) | List of rules for inserting or updating metric descriptors. | [ ] |
| `retain_initial_value_of_delta_metric` | This option specifies how the first value of a metric is handled. AWS EMF expects metric values to only contain deltas to the previous value. In the default case the first received value is therefor not sent to AWS but only used as a baseline for follow up changes to this metric. This is fine for high throughput metrics with stable labels (e.g. `requests{code=200}`). In this case it does not matter if the first value of this metric is discarded. However when your metric describes infrequent events or events with high label cardinality, then the exporter in default configuration would still drop the first occurrence of this metric. With this configuration value set to `true` the first value of all metrics will instead be send to AWS. | false |
Expand Down
2 changes: 1 addition & 1 deletion exporter/azuremonitorexporter/trace_to_envelope_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ var (
// - a specific SpanStatus as opposed to none
// - an error http.status_code
// - http.route is specified which should replace Span name as part of the RequestData name
// - no http.client_ip or net.peer.ip specified which causes data.Source to be empty
// - no http.client_ip or net.peer.ip specified which causes data.Source to be empty
// - adds a few different types of attributes
func TestHTTPServerSpanToRequestDataAttributeSet1(t *testing.T) {
span := getDefaultHTTPServerSpan()
Expand Down
2 changes: 1 addition & 1 deletion exporter/clickhouseexporter/internal/metrics_model.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ type MetricsModel interface {
insert(ctx context.Context, db *sql.DB) error
}

// MetricsMetaData contain specific metric data
// MetricsMetaData contain specific metric data
type MetricsMetaData struct {
ResAttr map[string]string
ResURL string
Expand Down
2 changes: 1 addition & 1 deletion exporter/elasticsearchexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ type DiscoverySettings struct {
Interval time.Duration `mapstructure:"interval"`
}

// FlushSettings defines settings for configuring the write buffer flushing
// FlushSettings defines settings for configuring the write buffer flushing
// policy in the Elasticsearch exporter. The exporter sends a bulk request with
// all events already serialized into the send-buffer.
type FlushSettings struct {
Expand Down
4 changes: 2 additions & 2 deletions exporter/googlecloudexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ These instructions are to get you up and running quickly with the GCP exporter i
section](#prerequisite-authenticating) above.


4. **Run the collector.** The following runs the collector in the foreground, so please execute it in a separate terminal.
4. **Run the collector.** The following runs the collector in the foreground, so please execute it in a separate terminal.

```sh
./otelcol-contrib --config=config.yaml
Expand Down Expand Up @@ -435,7 +435,7 @@ By default, the exporter sends telemetry to the project specified by `project` i
The `gcp.project.id` label can be combined with the `destination_project_quota` option to attribute quota usage to the project parsed by the label. This feature is currently only available
for traces and metrics. The Collector's default service account will need `roles/serviceusage.serviceUsageConsumer` IAM permissions in the destination quota project.

Note that this option will not work if a quota project is already defined in your Collector's GCP credentials. In this case, the telemetry will fail to export with a "project not found" error.
Note that this option will not work if a quota project is already defined in your Collector's GCP credentials. In this case, the telemetry will fail to export with a "project not found" error.
This can be done by manually editing your [ADC file](https://cloud.google.com/docs/authentication/application-default-credentials#personal) (if it exists) to remove the `quota_project_id` entry line.

## Features and Feature-Gates
Expand Down
2 changes: 1 addition & 1 deletion exporter/googlecloudpubsubexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ type Config struct {

// WatermarkConfig customizes the behavior of the watermark
type WatermarkConfig struct {
// Behavior of the watermark. Currently, only of the message (none, earliest and current, current being the default)
// Behavior of the watermark. Currently, only of the message (none, earliest and current, current being the default)
// will set the timestamp on pubsub based on timestamps of the events inside the message
Behavior string `mapstructure:"behavior"`
// Indication on how much the timestamp can drift from the current time, the timestamp will be capped to the allowed
Expand Down
2 changes: 1 addition & 1 deletion exporter/logzioexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func (c *Config) Validate() error {
// CheckAndWarnDeprecatedOptions Is checking for soon deprecated configuration options (queue_max_length, queue_capacity, drain_interval, custom_endpoint) log a warning message and map to the relevant updated option
func (c *Config) checkAndWarnDeprecatedOptions(logger hclog.Logger) {
if c.QueueCapacity != 0 {
logger.Warn("You are using the deprecated`queue_capacity` option that will be removed in the next release; use exporter helper configuration instead: https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md")
logger.Warn("You are using the deprecated `queue_capacity` option that will be removed in the next release; use exporter helper configuration instead: https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md")
}
// Warn and map queue_max_length -> QueueSettings.QueueSize
if c.QueueMaxLength != 0 {
Expand Down
8 changes: 4 additions & 4 deletions exporter/prometheusremotewriteexporter/DESIGN.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ Because of the gaps mentioned above, this project will convert from the current

## **2. Prometheus Remote Write/Cortex Exporter**

The Prometheus remote write/Cortex exporter should receive OTLP metrics, group data points by metric name and label set, convert each group to a TimeSeries, and send all TimeSeries to a storage backend via HTTP.
The Prometheus remote write/Cortex exporter should receive OTLP metrics, group data points by metric name and label set, convert each group to a TimeSeries, and send all TimeSeries to a storage backend via HTTP.

### **2.1 Receiving Metrics**
The Prometheus remote write/Cortex exporter receives a MetricsData instance in its PushMetrics() function. MetricsData contains a collection of Metric instances. Each Metric instance contains a series of data points, and each data point has a set of labels associated with it. Since Prometheus remote write TimeSeries are identified by unique sets of labels, the exporter needs to group data points within each Metric instance by their label set, and convert each group to a TimeSeries.
The Prometheus remote write/Cortex exporter receives a MetricsData instance in its PushMetrics() function. MetricsData contains a collection of Metric instances. Each Metric instance contains a series of data points, and each data point has a set of labels associated with it. Since Prometheus remote write TimeSeries are identified by unique sets of labels, the exporter needs to group data points within each Metric instance by their label set, and convert each group to a TimeSeries.

To group data points by label set, the exporter should create a map with each PushMetrics() call. The key of the map should represent a combination of the following information:

Expand All @@ -67,7 +67,7 @@ The value of the map should be Prometheus TimeSeries, and each data point’s va

Pseudocode:

func PushMetrics(metricsData) {
func PushMetrics(metricsData) {

// Create a map that stores distinct TimeSeries
map := make(map[String][]TimeSeries)
Expand All @@ -81,7 +81,7 @@ Pseudocode:
// Add to TimeSeries

// Sends TimeSeries to backend
export(map)
export(map)
}

### **2.2 Mapping of OTLP Metrics to TimeSeries**
Expand Down
2 changes: 1 addition & 1 deletion exporter/signalfxexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ type Config struct {
// to be used in a dimension key.
NonAlphanumericDimensionChars string `mapstructure:"nonalphanumeric_dimension_chars"`

// Whether to drop histogram bucket metrics dispatched to Splunk Observability.
// Whether to drop histogram bucket metrics dispatched to Splunk Observability.
// Default value is set to false.
DropHistogramBuckets bool `mapstructure:"drop_histogram_buckets"`

Expand Down
Loading

0 comments on commit 8e400f4

Please sign in to comment.