diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index ee1ff064a7..9806375b86 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,3 +3,4 @@
# For more info, see https://help.github.com/articles/about-codeowners/
* @elastic/obs-docs
+/.github/workflows/co-docs-builder.yml @elastic/docs-engineering
diff --git a/.github/workflows/co-docs-builder.yml b/.github/workflows/co-docs-builder.yml
new file mode 100644
index 0000000000..e37cdf4aca
--- /dev/null
+++ b/.github/workflows/co-docs-builder.yml
@@ -0,0 +1,31 @@
+name: Elastic docs
+
+on:
+ pull_request_target:
+ # The paths property can be omitted entirely if the repo is mainly used for docs. Leaving it in can result in builds that
+ # have branch protection checks in place lose the ability to merge because the workflow is not starting. If this property
+ # is included, please ensure that branch protection checks are disabled for the repo.
+ paths:
+ # Preface with your docs dir if you need further specificity (optional)
+ - 'docs/en/serverless/**.mdx'
+ - 'docs/en/serverless/**.docnav.json'
+ - 'docs/en/serverless/**.docapi.json'
+ - 'docs/en/serverless/**.devdocs.json'
+ - 'docs/en/serverless/**.jpg'
+ - 'docs/en/serverless/**.jpeg'
+ - 'docs/en/serverless/**.svg'
+ - 'docs/en/serverless/**.png'
+ - 'docs/en/serverless/**.gif'
+ types: [closed, opened, synchronize, labeled]
+
+jobs:
+ publish:
+ if: contains(github.event.pull_request.labels.*.name, 'ci:doc-build')
+ uses: elastic/workflows/.github/workflows/docs-elastic-co-publish.yml@main
+ with:
+ subdirectory: 'docs/en/serverless/'
+ secrets:
+ VERCEL_GITHUB_TOKEN: ${{ secrets.VERCEL_GITHUB_TOKEN_PUBLIC }}
+ VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN_PUBLIC }}
+ VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID_PUBLIC }}
+ VERCEL_PROJECT_ID_DOCS_CO: ${{ secrets.VERCEL_PROJECT_ID_DOCS_CO_PUBLIC }}
diff --git a/.github/workflows/opentelemetry.yml b/.github/workflows/opentelemetry.yml
deleted file mode 100644
index 84a6209ff2..0000000000
--- a/.github/workflows/opentelemetry.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Look up results at https://ela.st/oblt-ci-cd-stats.
-# There will be one service per GitHub repository, including the org name, and one Transaction per Workflow.
-name: OpenTelemetry Export Trace
-
-on:
- workflow_run:
- workflows: [ "*" ]
- types: [completed]
-
-permissions:
- contents: read
-
-jobs:
- otel-export-trace:
- runs-on: ubuntu-latest
- steps:
- - uses: elastic/apm-pipeline-library/.github/actions/opentelemetry@current
- with:
- vaultUrl: ${{ secrets.VAULT_ADDR }}
- vaultRoleId: ${{ secrets.VAULT_ROLE_ID }}
- vaultSecretId: ${{ secrets.VAULT_SECRET_ID }}
diff --git a/README.md b/README.md
index 908e5cc331..87f86630c6 100644
--- a/README.md
+++ b/README.md
@@ -8,32 +8,70 @@ Within this repo, the `/docs/en/` directory is structured as follows:
| Directory | Description |
| --------------------- | ----------- |
-| __integrations__ | Contains the source files for the [Integrations Developer Guide](https://www.elastic.co/guide/en/integrations-developer/current/index.html)
+| __integrations__ | Contains the source files for the [Integrations Developer Guide](https://www.elastic.co/guide/en/integrations-developer/current/index.html).
| __observability__ | Contains the source files for the [Observability Guide](https://www.elastic.co/guide/en/observability/current/index.html), which includes content for APM, Logs, Metrics, Synthetics, User experience, and Uptime.|
+| __serverless__ | Contains the source files for the [Elastic Observability Serverless docs](https://docs.elastic.co/serverless/observability/what-is-observability-serverless).
| __shared__ | Contains the source files for shared Observability content.|
| __templates__ | Contains content templates.|
-## Reviews
-
-All documentation pull requests automatically add the **[@obs-docs](https://github.com/orgs/elastic/teams/obs-docs)** team as a reviewer.
+## Contributing
-## Backporting
+If you find any bugs in our documentation, or want to request an enhancement, then you can open an issue using our template. We also welcome contributions in the form of PRs. Before you submit a PR, make sure that you have signed our [Contributor License Agreement](https://www.elastic.co/contributor-agreement/).
-Pull requests should be tagged with the target version of the Elastic Stack along with any relevant backport labels. In general, we only backport documentation changes to [live stack versions](https://github.com/elastic/docs/blob/master/conf.yaml#L74). For manual backports, we recommend using the [backport tool](https://github.com/sqren/backport) to easily open backport PRs. If you need help, ping **[@obs-docs](https://github.com/orgs/elastic/teams/obs-docs)** and we'd be happy to handle the backport process for you.
+Contributing directly to the docs works differently across the doc sets in this repo.
+### Observability Guide
-## Build
+The source files for the Observability Guide are written in [AsciiDoc](https://docs.asciidoctor.org/asciidoc/latest/) and are built using [elastic/docs](https://github.com/elastic/docs).
-To build the docs:
+To build the docs locally:
1. Check out the `elastic/docs` repository, along with any repositories that contain source files.
-
2. Run the `build_docs` script, passing in the path to the `index.asciidoc` and resource paths to other repos that contain source files. For example, to build the Observability Guide and open it in the browser, run:
+ ```
+ ../docs/build_docs --doc ./docs/en/observability/index.asciidoc --chunk 3 --resource ../apm-server --resource ../ingest-docs/docs --open
+ ```
-```
-../docs/build_docs --doc ./docs/en/observability/index.asciidoc --chunk 3 --resource ../apm-server --resource ../ingest-docs/docs --open
-```
+The above command assumes that this repo, [elastic/docs](https://github.com/elastic/docs), [elastic/ingest-docs](https://github.com/elastic/ingest-docs), and [elastic/apm-server](https://github.com/elastic/apm-server) are checked out into the same parent directory.
+
+If you prefer to use aliases, you can load the [elastic/docs/doc_build_aliases.sh file](https://github.com/elastic/docs/blob/master/doc_build_aliases.sh), which has the resources defined for you.
+
+### Elastic Observability Serverless docs
+
+The Elastic Observability Serverless docs use a custom syntax written in [MDX](https://mdxjs.com/). In many cases, you only need to know plain Markdown to contribute. We'll add a public component reference and additional contribution guidelines in future. Elasticians can refer to our [internal syntax reference](https://docs.elastic.dev/docsmobile/syntax).
+
+### Integrations Developer Guide
+
+The source files for the Integrations Developer Guide are written in [AsciiDoc](https://docs.asciidoctor.org/asciidoc/latest/) and are built using [elastic/docs](https://github.com/elastic/docs).
+
+To build the docs locally:
+
+1. Check out the `elastic/docs` repository, along with any repositories that contain source files.
+2. Run the `build_docs` script, passing in the path to the `index.asciidoc` and resource paths to other repos that contain source files. For example, to build the Observability Guide and open it in the browser, run:
+ ```
+ ../docs/build_docs --doc ./docs/en/integrations/index.asciidoc --resource=../package-spec/versions --chunk 1 --open
+ ```
-The above command assumes that [elastic/docs](https://github.com/elastic/docs), [elastic/ingest-docs](https://github.com/elastic/ingest-docs), and [elastic/apm-server](https://github.com/elastic/apm-server) are checked out into the same parent directory.
+The above command assumes that this repo, [elastic/docs](https://github.com/elastic/docs), and [elastic/package-spec](https://github.com/elastic/package-spec), are checked out into the same parent directory.
If you prefer to use aliases, you can load the [elastic/docs/doc_build_aliases.sh file](https://github.com/elastic/docs/blob/master/doc_build_aliases.sh), which has the resources defined for you.
+
+## Backporting
+
+Backporting works differently across the doc sets in this repo.
+
+### Observability Guide
+
+Pull requests should be tagged with the target version of the Elastic Stack along with any relevant backport labels. In general, we only backport documentation changes to [live stack versions](https://github.com/elastic/docs/blob/master/conf.yaml#L74). For manual backports, we recommend using the [backport tool](https://github.com/sqren/backport) to easily open backport PRs. If you need help, ping **[@obs-docs](https://github.com/orgs/elastic/teams/obs-docs)** and we'd be happy to handle the backport process for you.
+
+### Elastic Observability Serverless docs
+
+Serverless docs are not versioned, and should never be backported. All changes should be made to the `main` branch.
+
+### Integrations Developer Guide
+
+The Integrations Developer Guide is not versioned, and should never be backported. All changes should be made to the `main` branch.
+
+## Reviews
+
+All documentation pull requests automatically add the **[@obs-docs](https://github.com/orgs/elastic/teams/obs-docs)** team as a reviewer.
diff --git a/docs/en/observability/apm/data-model.asciidoc b/docs/en/observability/apm/data-model.asciidoc
index 7d049cfad2..03493aa366 100644
--- a/docs/en/observability/apm/data-model.asciidoc
+++ b/docs/en/observability/apm/data-model.asciidoc
@@ -357,9 +357,9 @@ You can filter and group by these dimensions (some of which are optional, for ex
* `host.name`: The user-defined name of the host or the detected hostname of the service that served the transaction
* `host.os.platform`: The platform name of the service that served the transaction, for example `linux`
* `kubernetes.pod.name`: The name of the Kubernetes pod running the service that served the transaction
-* `labels`: Key-value object containing string labels set globally by the APM agents.
+* `labels`: Key-value object containing string labels set globally by the APM agents. This dimension is not present for RUM agents.
* `metricset.interval`: A string with the aggregation interval the metricset represents.
-* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents.
+* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents. This dimension is not present for RUM agents.
* `service.environment`: The environment of the service that served the transaction
* `service.language.name`: The language name of the service that served the transaction, for example `Go`
* `service.language.version`: The language version of the service that served the transaction
@@ -411,9 +411,9 @@ These metric documents can be identified by searching for `metricset.name: servi
You can filter and group by these dimensions:
* `agent.name`: The name of the {apm-agent} that instrumented the operation, for example `java`
-* `labels`: Key-value object containing string labels set globally by the APM agents.
+* `labels`: Key-value object containing string labels set globally by the APM agents. This dimension is not present for RUM agents.
* `metricset.interval`: A string with the aggregation interval the metricset represents.
-* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents.
+* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents. This dimension is not present for RUM agents.
* `service.environment`: The environment of the service that made the request
* `service.language.name`: The language name of the service that served the transaction, for example `Go`
* `service.name`: The name of the service that made the request
@@ -456,9 +456,9 @@ You can filter and group by these dimensions:
* `agent.name`: The name of the {apm-agent} that instrumented the operation, for example `java`
* `event.outcome`: The outcome of the operation, for example `success`
-* `labels`: Key-value object containing string labels set globally by the APM agents.
+* `labels`: Key-value object containing string labels set globally by the APM agents. This dimension is not present for RUM agents.
* `metricset.interval`: A string with the aggregation interval the metricset represents.
-* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents.
+* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents. This dimension is not present for RUM agents.
* `service.environment`: The environment of the service that made the request
* `service.language.name`: The language name of the service that served the transaction, for example `Go`
* `service.name`: The name of the service that made the request
@@ -496,9 +496,9 @@ These metric documents can be identified by searching for `metricset.name: servi
You can filter and group by these dimensions:
* `agent.name`: The name of the {apm-agent} that instrumented the operation, for example `java`
-* `labels`: Key-value object containing string labels set globally by the APM agents.
+* `labels`: Key-value object containing string labels set globally by the APM agents. This dimension is not present for RUM agents.
* `metricset.interval`: A string with the aggregation interval the metricset represents.
-* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents.
+* `numeric_labels`: Key-value object containing numeric labels set globally by the APM agents. This dimension is not present for RUM agents.
* `service.environment`: The environment of the service that made the request
* `service.language.name`: The language name of the service that served the transaction, for example `Go`
* `service.name`: The name of the service that made the request
@@ -574,7 +574,9 @@ Once this limit is reached, any new combinations of `transaction.name`, `transac
This issue can be resolved by increasing memory available to APM Server, or by ensuring that the dimensions do not use values
that are based on parameters that can change. For example, user ids, product ids, order numbers, query parameters, etc.,
-should be stripped away from the dimensions.
+should be stripped away from the dimensions. For the same reason, avoid high cardinality global labels (`labels.\*` and `numeric_labels.*`).
+
+Aggregated metrics do not consider global labels from RUM agents in order to protect APM server from using too much memory.
// This heading is linked to from the APM UI section in Kibana
[[apm-data-model-metadata]]
diff --git a/docs/en/observability/cloud-monitoring/aws/images/firehose-waf-logs.png b/docs/en/observability/cloud-monitoring/aws/images/firehose-waf-logs.png
new file mode 100644
index 0000000000..0e05f6fa55
Binary files /dev/null and b/docs/en/observability/cloud-monitoring/aws/images/firehose-waf-logs.png differ
diff --git a/docs/en/observability/cloud-monitoring/aws/monitor-amazon-intro.asciidoc b/docs/en/observability/cloud-monitoring/aws/monitor-amazon-intro.asciidoc
index f0770b9efa..928b86c0bb 100644
--- a/docs/en/observability/cloud-monitoring/aws/monitor-amazon-intro.asciidoc
+++ b/docs/en/observability/cloud-monitoring/aws/monitor-amazon-intro.asciidoc
@@ -38,6 +38,8 @@ include::monitor-aws-vpc-flow-logs.asciidoc[leveloffset=+2]
include::monitor-aws-cloudtrail-firehose.asciidoc[leveloffset=+2]
+include::monitor-aws-waf-firehose.asciidoc[leveloffset=+2]
+
include::monitor-aws-cloudwatch-firehose.asciidoc[leveloffset=+2]
include::monitor-aws-firehose-troubleshooting.asciidoc[leveloffset=+2]
diff --git a/docs/en/observability/cloud-monitoring/aws/monitor-aws-waf-firehose.asciidoc b/docs/en/observability/cloud-monitoring/aws/monitor-aws-waf-firehose.asciidoc
new file mode 100644
index 0000000000..8433de4701
--- /dev/null
+++ b/docs/en/observability/cloud-monitoring/aws/monitor-aws-waf-firehose.asciidoc
@@ -0,0 +1,128 @@
+[[monitor-aws-waf-firehose]]
+= Monitor Web Application Firewall (WAF) logs
+
+++++
+Monitor WAF logs
+++++
+
+In this section, you'll learn how to send AWS WAF events from AWS to your {stack} using Amazon Data Firehose.
+
+You will go through the following steps:
+
+- Select a WAF-compatible resource (for example, a CloudFront distribution)
+- Create a delivery stream in Amazon Data Firehose
+- Create a web Access Control List (ACL) to generate WAF logs
+- Set up logging to forward the logs to the {stack} using a Firehose stream
+- Visualize your WAF logs in {kib}
+
+[discrete]
+[[firehose-waf-prerequisites]]
+== Before you begin
+
+We assume that you already have:
+
+- An AWS account with permissions to pull the necessary data from AWS.
+- A deployment using our hosted {ess} on {ess-trial}[{ecloud}]. The deployment includes an {es} cluster for storing and searching your data, and {kib} for visualizing and managing your data. AWS Data Firehose works with Elastic Stack version 7.17 or greater, running on Elastic Cloud only.
+
+IMPORTANT: Make sure the deployment is on AWS, because the Amazon Data Firehose delivery stream connects specifically to an endpoint that needs to be on AWS.
+
+[discrete]
+[[firehose-waf-step-one]]
+== Step 1: Install the AWS integration in {kib}
+
+. In {kib}, navigate to *Management* > *Integrations* and browse the catalog to find the AWS integration.
+
+. Navigate to the *Settings* tab and click *Install AWS assets*.
+
+[discrete]
+[[firehose-waf-step-two]]
+== Step 2: Create a delivery stream in Amazon Data Firehose
+
+. Go to the https://console.aws.amazon.com/[AWS console] and navigate to Amazon Data Firehose.
+
+. Click *Create Firehose stream* and choose the source and destination of your Firehose stream. Unless you are streaming data from Kinesis Data Streams, set source to `Direct PUT` and destination to `Elastic`.
+
+. Provide a meaningful *Firehose stream name* that will allow you to identify this delivery stream later. Your Firehose name must start with the prefix `aws-waf-logs-` or it will not show up later.
+
+NOTE: For advanced use cases, source records can be transformed by invoking a custom Lambda function. When using Elastic integrations, this should not be required.
+
+[discrete]
+[[firehose-waf-step-three]]
+== Step 3: Specify the destination settings for your Firehose stream
+
+. From the *Destination settings* panel, specify the following settings:
++
+* *Elastic endpoint URL*: Enter the Elastic endpoint URL of your Elasticsearch cluster. To find the Elasticsearch endpoint, go to the Elastic Cloud console, navigate to the Integrations page, and select *Connection details*. Here is an example of how it looks like: `https://my-deployment.es.us-east-1.aws.elastic-cloud.com`.
++
+* *API key*: Enter the encoded Elastic API key. To create an API key, go to the Elastic Cloud console, navigate to the Integrations page, select *Connection details* and click *Create and manage API keys*. If you are using an API key with *Restrict privileges*, make sure to review the Indices privileges to provide at least "auto_configure" & "write" permissions for the indices you will be using with this delivery stream.
++
+* *Content encoding*: For a better network efficiency, leave content encoding set to GZIP.
++
+* *Retry duration*: Determines how long Firehose continues retrying the request in the event of an error. A duration of 60-300s should be suitable for most use cases.
++
+* *es_datastream_name*: `logs-aws.waf-default`
+
+[discrete]
+[[firehose-waf-step-four]]
+== Step 4: Create a web access control list
+
+To create a new web access control list (ACL), follow these steps:
+
+. Go to the https://console.aws.amazon.com/[AWS console] and navigate to the *WAF & Shield* page.
+
+. Describe web ACL by entering the resource type, region, and name.
+
+. Associate it to an AWS resource. If you don't have an existing resource, you can create and attach a web ACL to several AWS resources:
++
+- CloudFront distribution
+- Application Load Balancers
+- Amazon API Gateway REST APIs
+- Amazon App Runner services
+- AWS AppSync GraphQL APIs
+- Amazon Cognito user pools
+- AWS Verified Access Instances
+
+. Add a 1 or 2 rules to the *Free rule groups* list from the AWS managed rule groups. Keep all other settings to their default values.
+
+. Set the rule priority by keeping default values.
+
+. Configure metrics by keeping default values.
+
+. Review and create the web ACL.
+
+[discrete]
+[[firehose-waf-step-five]]
+== Step 5: Set up logging
+
+. Go to the web ACL you created in the previous step.
+
+. Open the *Logging and metrics* section and edit the following settings:
++
+- *Logging destination*: select "Amazon Data Firehose stream"
+- *Amazon Data Firehose stream*: select the Firehose stream you created in step 2.
+
+WAF creates the required Identity and Access Management (IAM) role.
+If your Firehose stream name doesn't appear in the list, make sure the name you chose for the stream starts with `aws-waf-logs-`, as prescribed by AWS naming conventions.
+
+[discrete]
+[[firehose-waf-step-six]]
+== Step 6: Visualize your WAF logs in {kib}
+
+You can now log into your {stack} to check if the WAF logs are flowing. To generate logs, you can use cURL to send HTTP requests to your testing CloudFront distribution.
+
+[source,console]
+----
+curl -i https://.cloudfront.net
+----
+
+To maintain a steady flow of logs, you can use `watch -n 5` to repeat the command every 5 seconds.
+
+[source,console]
+----
+watch -n 5 curl -i https://.cloudfront.net
+----
+
+Navigate to Kibana and visualize the first WAF logs in your {stack}.
+
+[role="screenshot"]
+image::firehose-waf-logs.png[Firehose WAF logs in Kibana]
diff --git a/docs/en/observability/images/hosts.png b/docs/en/observability/images/hosts.png
index 31732906fc..479ba32124 100644
Binary files a/docs/en/observability/images/hosts.png and b/docs/en/observability/images/hosts.png differ
diff --git a/docs/en/observability/images/log-threshold-breach.png b/docs/en/observability/images/log-threshold-breach.png
new file mode 100644
index 0000000000..200ddfb875
Binary files /dev/null and b/docs/en/observability/images/log-threshold-breach.png differ
diff --git a/docs/en/observability/images/metrics-overlay.png b/docs/en/observability/images/metrics-overlay.png
index c04c794b48..4d40b434d4 100644
Binary files a/docs/en/observability/images/metrics-overlay.png and b/docs/en/observability/images/metrics-overlay.png differ
diff --git a/docs/en/observability/index.asciidoc b/docs/en/observability/index.asciidoc
index dc2be31769..a2daf4fa72 100644
--- a/docs/en/observability/index.asciidoc
+++ b/docs/en/observability/index.asciidoc
@@ -172,6 +172,7 @@ include::create-alerts.asciidoc[leveloffset=+1]
include::aggregation-options.asciidoc[leveloffset=+2]
include::view-observability-alerts.asciidoc[leveloffset=+2]
include::triage-slo-burn-rate-breaches.asciidoc[leveloffset=+3]
+include::triage-threshold-breaches.asciidoc[leveloffset=+3]
//SLOs
include::slo-overview.asciidoc[leveloffset=+1]
diff --git a/docs/en/observability/monitor-infra/analyze-hosts.asciidoc b/docs/en/observability/monitor-infra/analyze-hosts.asciidoc
index d9ec1e154d..8617802465 100644
--- a/docs/en/observability/monitor-infra/analyze-hosts.asciidoc
+++ b/docs/en/observability/monitor-infra/analyze-hosts.asciidoc
@@ -35,9 +35,9 @@ averages of key metrics, including CPU usage, memory usage, and throughput.
default is 50, which means the page shows data for the top 50 hosts based on the
most recent timestamps. You can increase the host limit to see data for more
hosts, but doing so may impact query performance.
-* The Hosts table shows a breakdown of metrics for each host. You may need to
-page through the list or change the number of rows displayed on each page to see
-all of your hosts.
+* The Hosts table shows a breakdown of metrics for each host along with an alert count
+for any hosts with active alerts. You may need to page through the list
+or change the number of rows displayed on each page to see all of your hosts.
* Each host name is an active link to a <> page,
which includes metrics, host metadata, alerts, processes, logs, and anomalies.
You can optionally open the host details in an overlay.
@@ -46,6 +46,8 @@ the already returned data set.
* The tabs at the bottom of the page show an overview of the metrics, logs,
and alerts for all hosts returned by your search.
+TIP: For more information about creating and viewing alerts, refer to <>.
+
[discrete]
[[analyze-hosts-filter-view]]
== Filter the Hosts view
@@ -170,6 +172,18 @@ image::images/hosts-view-alerts.png[Screenshot showing Alerts view]
To see alerts for a specific host, refer to <>.
+*****
+**Why are alerts missing from the Hosts page?**
+
+If your rules are triggering alerts that don't appear on the **Hosts** page,
+edit the rules and make sure they are correctly configured to associate the host name with the alert:
+
+* For Metric threshold or Custom threshold rules, select `host.name` in the **Group alerts by** field.
+* For Inventory rules, select **Host** for the node type under **Conditions**.
+
+To learn more about creating and managing rules, refer to <>.
+*****
+
[discrete]
[[view-host-details]]
== View host details
diff --git a/docs/en/observability/monitor-infra/host-details-partial.asciidoc b/docs/en/observability/monitor-infra/host-details-partial.asciidoc
index fcce796c1c..bbd7025008 100644
--- a/docs/en/observability/monitor-infra/host-details-partial.asciidoc
+++ b/docs/en/observability/monitor-infra/host-details-partial.asciidoc
@@ -13,6 +13,8 @@ Change the time range to view metrics over a specific period of time.
Hover over a specific time period on a chart to compare the various metrics at that given time.
+Expand the **Alerts** section to see alerts related to the selected host.
+
====
[%collapsible]
diff --git a/docs/en/observability/triage-slo-burn-rate-breaches.asciidoc b/docs/en/observability/triage-slo-burn-rate-breaches.asciidoc
index 420b0204c6..b0748e8b62 100644
--- a/docs/en/observability/triage-slo-burn-rate-breaches.asciidoc
+++ b/docs/en/observability/triage-slo-burn-rate-breaches.asciidoc
@@ -9,8 +9,8 @@ When this happens, you are at risk of exhausting your error budget and violating
To triage issues quickly, go to the alert details page:
-. Go to **{observability}** -> **Alerts** (or open the SLO and click **Alerts**.)
-. From the Alerts table, click the image:images/icons/boxesHorizontal.svg[More actions icon] icon next to the alert and select **View alert details**.
+. Go to **{observability}** → **Alerts** (or open the SLO and click **Alerts**).
+. From the Alerts table, click the image:images/icons/boxesHorizontal.svg[More actions] icon next to the alert and select **View alert details**.
The alert details page shows information about the alert, including when the alert was triggered,
the duration of the alert, the source SLO, and the rule that triggered the alert.
@@ -37,3 +37,5 @@ After investigating the alert, you may want to:
* Click **Snooze the rule** to snooze notifications for a specific time period or indefinitely.
* Click the image:images/icons/boxesVertical.svg[Actions] icon and select **Add to case** to add the alert to a new or existing case. To learn more, refer to <>.
* Click the image:images/icons/boxesVertical.svg[Actions] icon and select **Mark as untracked**.
+When an alert is marked as untracked, actions are no longer generated.
+You can choose to move active alerts to this state when you disable or delete rules.
diff --git a/docs/en/observability/triage-threshold-breaches.asciidoc b/docs/en/observability/triage-threshold-breaches.asciidoc
new file mode 100644
index 0000000000..9cc1341065
--- /dev/null
+++ b/docs/en/observability/triage-threshold-breaches.asciidoc
@@ -0,0 +1,48 @@
+[[triage-threshold-breaches]]
+= Triage threshold breaches
+++++
+Threshold breaches
+++++
+
+Threshold breaches occur when an {observability} data type reaches or exceeds the threshold set in your <>.
+For example, you might have a custom threshold rule that triggers an alert when the total number of log documents with a log level of `error` reaches 100.
+
+To triage issues quickly, go to the alert details page:
+
+. Go to **{observability}** → **Alerts**.
+. From the Alerts table, click the image:images/icons/boxesHorizontal.svg[More actions] icon next to the alert and select **View alert details**.
+
+The alert details page shows information about the alert, including when the alert was triggered,
+the duration of the alert, and the last status update.
+If there is a "group by" field specified in the rule, the page also includes the source.
+You can follow the links to navigate to the rule definition.
+
+Explore charts on the page to learn more about the threshold breach:
+
+[role="screenshot"]
+image::images/log-threshold-breach.png[Alert details for log threshold breach]
+
+* The page includes a chart for each condition specified in the rule.
+These charts help you understand when the breach occurred and its severity.
+* If your rule is intended to detect log threshold breaches
+(that is, it has a single condition that uses a count aggregation),
+you can run a log rate analysis, assuming you have the required license.
+Running a log rate analysis is useful for detecting significant dips or spikes in the number of logs.
+Notice that you can adjust the baseline and deviation, and then run the analysis again.
+For more information about using the log rate analysis feature,
+refer to the {kibana-ref}/xpack-ml-aiops.html#log-rate-analysis[AIOps Labs] documentation.
+* The page may also include an alerts history chart that shows the number of triggered alerts per day for the last 30 days.
+This chart is currently only available for rules that specify a single condition.
+* Timelines on the page are annotated to show when the threshold was breached.
+You can hover over an alert icon to see the timestamp of the alert.
+
+Analyze these charts to better understand when the breach started, it's current
+state, and how the issue is trending.
+
+After investigating the alert, you may want to:
+
+* Click **Snooze the rule** to snooze notifications for a specific time period or indefinitely.
+* Click the image:images/icons/boxesVertical.svg[Actions] icon and select **Add to case** to add the alert to a new or existing case. To learn more, refer to <>.
+* Click the image:images/icons/boxesVertical.svg[Actions] icon and select **Mark as untracked**.
+When an alert is marked as untracked, actions are no longer generated.
+You can choose to move active alerts to this state when you disable or delete rules.
diff --git a/docs/en/observability/uptime-intro.asciidoc b/docs/en/observability/uptime-intro.asciidoc
index 3c6dc6e46e..f4d8b8732e 100644
--- a/docs/en/observability/uptime-intro.asciidoc
+++ b/docs/en/observability/uptime-intro.asciidoc
@@ -13,6 +13,8 @@ infrastructure with {heartbeat} natively.
For browser-based monitors, a richer management and reporting experience,
and more capabilities such as triaging and responding to alerts, use the
<> instead of the {uptime-app}.
+
+Note that the {uptime-app} is hidden from the interface when there is no recent {heartbeat} data. To see the app, you may need to turn on the **Always show legacy Uptime app** setting (`observability:enableLegacyUptimeApp`) under {kib} Advanced Settings. To learn how, refer to {kibana-ref}/advanced-options.html[Advanced Settings].
====
The {uptime-app} uses {agent} to periodically check the status of your services and applications.
diff --git a/docs/en/observability/view-observability-alerts.asciidoc b/docs/en/observability/view-observability-alerts.asciidoc
index 86d63a6f21..9411c94fad 100644
--- a/docs/en/observability/view-observability-alerts.asciidoc
+++ b/docs/en/observability/view-observability-alerts.asciidoc
@@ -34,6 +34,8 @@ An alert is "Active" when the condition defined in the rule currently matches.
An alert has "Recovered" when that condition, which previously matched, is currently no longer matching.
An alert is "Untracked" when its corresponding rule is disabled or you mark the alert as untracked.
To mark the alert as untracked, go to the Alerts table, click the image:images/icons/boxesHorizontal.svg[More actions] icon to expand the "More actions" menu, and click *Mark as untracked*.
+When an alert is marked as untracked, actions are no longer generated.
+You can choose to move active alerts to this state when you disable or delete rules.
NOTE: There is also a "Flapping" status, which means the alert is switching repeatedly between active and recovered states.
This status is possible only if you have enabled alert flapping detection.
diff --git a/docs/en/serverless/ai-assistant/ai-assistant.mdx b/docs/en/serverless/ai-assistant/ai-assistant.mdx
new file mode 100644
index 0000000000..32a62d2258
--- /dev/null
+++ b/docs/en/serverless/ai-assistant/ai-assistant.mdx
@@ -0,0 +1,251 @@
+---
+id: serverlessObservabilityAiAssistant
+slug: /serverless/observability/ai-assistant
+title: AI Assistant
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+
+The AI Assistant uses generative AI to provide:
+
+* **Chat**: Have conversations with the AI Assistant. Chat uses function calling to request, analyze, and visualize your data.
+* **Contextual insights**: Open prompts throughout ((observability)) that explain errors and messages and suggest remediation.
+
+![Observability AI assistant preview](../images/ai-assistant-overview.gif)
+
+The AI Assistant integrates with your large language model (LLM) provider through our supported Elastic connectors:
+
+* [OpenAI connector](((kibana-ref))/openai-action-type.html) for OpenAI or Azure OpenAI Service.
+* [Amazon Bedrock connector](((kibana-ref))/bedrock-action-type.html) for Amazon Bedrock, specifically for the Claude models.
+
+
+The AI Assistant is powered by an integration with your large language model (LLM) provider.
+LLMs are known to sometimes present incorrect information as if it's correct.
+Elastic supports configuration and connection to the LLM provider and your knowledge base,
+but is not responsible for the LLM's responses.
+
+
+
+Also, the data you provide to the Observability AI assistant is _not_ anonymized, and is stored and processed by the third-party AI provider. This includes any data used in conversations for analysis or context, such as alert or event data, detection rule configurations, and queries. Therefore, be careful about sharing any confidential or sensitive details while using this feature.
+
+
+## Requirements
+
+The AI assistant requires the following:
+
+* An account with a third-party generative AI provider that supports function calling. The Observability AI Assistant supports the following providers:
+ * OpenAI `gpt-4`+.
+ * Azure OpenAI Service `gpt-4`(0613) or `gpt-4-32k`(0613) with API version `2023-07-01-preview` or more recent.
+ * AWS Bedrock, specifically the Anthropic Claude models.
+* The knowledge base requires a 4 GB ((ml)) node.
+
+## Your data and the AI Assistant
+
+Elastic does not use customer data for model training. This includes anything you send the model, such as alert or event data, detection rule configurations, queries, and prompts. However, any data you provide to the AI Assistant will be processed by the third-party provider you chose when setting up the OpenAI connector as part of the assistant setup.
+
+Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Please exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use.
+
+## Set up the AI Assistant
+
+To set up the AI Assistant:
+
+1. Create an authentication key with your AI provider to authenticate requests from the AI Assistant. You'll use this in the next step. Refer to your provider's documentation for information about creating authentication keys:
+ * [OpenAI API keys](https://platform.openai.com/docs/api-reference)
+ * [Azure OpenAI Service API keys](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference)
+ * [Amazon Bedrock authentication keys and secrets](https://docs.aws.amazon.com/bedrock/latest/userguide/security-iam.html)
+1. From **Project settings** → **Management** → **Connectors**, create an [OpenAI](((kibana-ref))/openai-action-type.html) or [Amazon Bedrock](((kibana-ref))/bedrock-action-type.html) connector.
+1. Authenticate communication between ((observability)) and the AI provider by providing the following information:
+ 1. In the **URL** field, enter the AI provider's API endpoint URL.
+ 1. Under **Authentication**, enter the API key or access key/secret you created in the previous step.
+
+## Add data to the AI Assistant knowledge base
+
+
+**If you started using the AI Assistant in technical preview**,
+any knowledge base articles you created using ELSER v1 will need to be reindexed or upgraded before they can be used.
+Going forward, you must create knowledge base articles using ELSER v2.
+You can either:
+
+* Clear all old knowledge base articles manually and reindex them.
+* Upgrade all knowledge base articles indexed with ELSER v1 to ELSER v2 using a [Python script](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/model-upgrades/upgrading-index-to-use-elser.ipynb).
+
+
+The AI Assistant uses [ELSER](((ml-docs))/ml-nlp-elser.html), Elastic's semantic search engine, to recall data from its internal knowledge base index to create retrieval augmented generation (RAG) responses. Adding data such as Runbooks, GitHub issues, internal documentation, and Slack messages to the knowledge base gives the AI Assistant context to provide more specific assistance.
+
+
+Your AI provider may collect telemetry when using the AI Assistant. Contact your AI provider for information on how data is collected.
+
+
+You can add information to the knowledge base by asking the AI Assistant to remember something while chatting (for example, "remember this for next time"). The assistant will create a summary of the information and add it to the knowledge base.
+
+You can also add external data to the knowledge base either in the Project Settings UI or using the ((es)) Index API.
+
+### Use the UI
+
+To add external data to the knowledge base in the Project Settings UI:
+
+1. Go to **Project Settings**.
+1. In the _Other_ section, click **AI assistant for Observability settings**.
+1. Then select the **Elastic AI Assistant for Observability**.
+1. Switch to the **Knowledge base** tab.
+1. Click the **New entry** button, and choose either:
+
+ * **Single entry**: Write content for a single entry in the UI.
+ * **Bulk import**: Upload a newline delimited JSON (`ndjson`) file containing a list of entries to add to the knowledge base.
+ Each object should conform to the following format:
+
+ ```json
+ {
+ "id": "a_unique_human_readable_id",
+ "text": "Contents of item",
+ }
+ ```
+
+### Use the ((es)) Index API
+
+1. Ingest external data (GitHub issues, Markdown files, Jira tickets, text files, etc.) into ((es)) using the ((es)) [Index API](((ref))/docs-index_.html).
+1. Reindex your data into the AI Assistant's knowledge base index by completing the following query in **Developer Tools** → **Console**. Update the following fields before reindexing:
+ * `InternalDocsIndex`: Name of the index where your internal documents are stored.
+ * `text_field`: Name of the field containing your internal documents' text.
+ * `timestamp`: Name of the timestamp field in your internal documents.
+ * `public`: If `true`, the document is available to all users with access to your Observability project. If `false`, the document is restricted to the user indicated in the following `user.name` field.
+ * `user.name` (optional): If defined, restricts the internal document's availability to a specific user.
+ * You can add a query filter to index specific documents.
+
+```console
+POST _reindex
+{
+ "source": {
+ "index": "",
+ "_source": [
+ "",
+ "",
+ "namespace",
+ "is_correction",
+ "public",
+ "confidence"
+ ]
+ },
+ "dest": {
+ "index": ".kibana-observability-ai-assistant-kb-000001",
+ "pipeline": ".kibana-observability-ai-assistant-kb-ingest-pipeline"
+ },
+ "script": {
+ "inline": "ctx._source.text = ctx._source.remove(\"\");ctx._source.namespace=\"\";ctx._source.is_correction=false;ctx._source.public=;ctx._source.confidence=\"high\";ctx._source['@timestamp'] = ctx._source.remove(\"\");ctx._source['user.name'] = \"\""
+ }
+}
+```
+
+## Interact with the AI Assistant
+
+You can chat with the AI Assistant or interact with contextual insights located throughout ((observability)).
+See the following sections for more on interacting with the AI Assistant.
+
+
+After every answer the LLM provides, let us know if the answer was helpful.
+Your feedback helps us improve the AI Assistant!
+
+
+### AI Assistant chat
+
+Click **AI Assistant** in the upper-right corner where available to start the chat:
+
+![Observability AI assistant preview](../images/ai-assistant-button.png)
+
+This opens the AI Assistant flyout, where you can ask the assistant questions about your instance:
+
+![Observability AI assistant chat](../images/ai-assistant-chat.png)
+
+### AI Assistant functions
+
+
+
+The AI Assistant uses several functions to include relevant context in the chat conversation through text, data, and visual components. Both you and the AI Assistant can suggest functions. You can also edit the AI Assistant's function suggestions and inspect function responses. For example, you could use the `kibana` function to call a ((kib)) API on your behalf.
+
+You can suggest the following functions:
+
+
+
+ `alerts`
+ Get alerts for ((observability)).
+
+
+ `elasticsearch`
+ Call ((es)) APIs on your behalf.
+
+
+ `kibana`
+ Call ((kib)) APIs on your behalf.
+
+
+ `summarize`
+ Summarize parts of the conversation.
+
+
+ `visualize_query`
+ Visualize charts for ES|QL queries.
+
+
+
+Additional functions are available when your cluster has APM data:
+
+
+
+ `get_apm_correlations`
+ Get field values that are more prominent in the foreground set than the background set. This can be useful in determining which attributes (such as `error.message`, `service.node.name`, or `transaction.name`) are contributing to, for instance, a higher latency. Another option is a time-based comparison, where you compare before and after a change point.
+
+
+ `get_apm_downstream_dependencies`
+ Get the downstream dependencies (services or uninstrumented backends) for a service. Map the downstream dependency name to a service by returning both `span.destination.service.resource` and `service.name`. Use this to drill down further if needed.
+
+
+ `get_apm_error_document`
+ Get a sample error document based on the grouping name. This also includes the stacktrace of the error, which might hint to the cause.
+
+
+ `get_apm_service_summary`
+ Get a summary of a single service, including the language, service version, deployments, the environments, and the infrastructure that it is running in. For example, the number of pods and a list of their downstream dependencies. It also returns active alerts and anomalies.
+
+
+ `get_apm_services_list`
+ Get the list of monitored services, their health statuses, and alerts.
+
+
+ `get_apm_timeseries`
+ Display different APM metrics (such as throughput, failure rate, or latency) for any service or all services and any or all of their dependencies. Displayed both as a time series and as a single statistic. Additionally, the function returns any changes, such as spikes, step and trend changes, or dips. You can also use it to compare data by requesting two different time ranges, or, for example, two different service versions.
+
+
+
+### AI Assistant contextual prompts
+
+AI Assistant contextual prompts throughout ((observability)) provide the following information:
+
+- **Alerts**: Provides possible causes and remediation suggestions for log rate changes.
+- **Application performance monitoring (APM)**: Explains APM errors and provides remediation suggestions.
+- **Logs**: Explains log messages and generates search patterns to find similar issues.
+
+{/* Not included in initial serverless launch */}
+{/* - **Universal Profiling**: explains the most expensive libraries and functions in your fleet and provides optimization suggestions. */}
+{/* - **Infrastructure Observability**: explains the processes running on a host. */}
+
+For example, in the log details, you'll see prompts for **What's this message?** and **How do I find similar log messages?**:
+
+![Observability AI assistant example prompts for logs](../images/ai-assistant-logs-prompts.png)
+
+Clicking a prompt generates a message specific to that log entry.
+You can continue a conversation from a contextual prompt by clicking **Start chat** to open the AI Assistant chat.
+
+![Observability AI assistant example](../images/ai-assistant-logs.png)
+
+## Known issues
+
+### Token limits
+
+Most LLMs have a set number of tokens they can manage in single a conversation.
+When you reach the token limit, the LLM will throw an error, and Elastic will display a "Token limit reached" error.
+The exact number of tokens that the LLM can support depends on the LLM provider and model you're using.
+
+
diff --git a/docs/en/serverless/aiops/aiops-analyze-spikes.mdx b/docs/en/serverless/aiops/aiops-analyze-spikes.mdx
new file mode 100644
index 0000000000..2538813428
--- /dev/null
+++ b/docs/en/serverless/aiops/aiops-analyze-spikes.mdx
@@ -0,0 +1,76 @@
+---
+id: serverlessObservabilityAiopsAnalyzeSpikes
+slug: /serverless/observability/aiops-analyze-spikes
+title: Analyze log spikes and drops
+description: Find and investigate the causes of unusual spikes or drops in log rates.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+
+
+{/* */}
+
+Elastic ((observability)) provides built-in log rate analysis capabilities,
+based on advanced statistical methods,
+to help you find and investigate the causes of unusual spikes or drops in log rates.
+
+To analyze log spikes and drops:
+
+1. In your ((observability)) project, go to **AIOps** → **Log rate analysis**.
+1. Choose a data view or saved search to access the log data you want to analyze.
+1. In the histogram chart, click a spike (or drop) to start the analysis.
+
+ ![Histogram showing log spikes and drops ](../images/log-rate-histogram.png)
+
+ When the analysis runs, it identifies statistically significant field-value combinations that contribute to the spike or drop,
+ and then displays them in a table:
+
+ ![Histogram showing log spikes and drops ](../images/log-rate-analysis-results.png)
+
+ Notice that you can optionally turn on **Smart grouping** to summarize the results into groups.
+ You can also click **Filter fields** to remove fields that are not relevant.
+
+ The table shows an indicator of the level of impact and a sparkline showing the shape of the impact in the chart.
+1. Select a row to display the impact of the field on the histogram chart.
+1. From the **Actions** menu in the table, you can choose to view the field in **Discover**,
+view it in ,
+or copy the table row information to the clipboard as a query filter.
+
+To pin a table row, click the row, then move the cursor to the histogram chart.
+It displays a tooltip with exact count values for the pinned field which enables closer investigation.
+
+Brushes in the chart show the baseline time range and the deviation in the analyzed data.
+You can move the brushes to redefine both the baseline and the deviation and rerun the analysis with the modified values.
+
+
+
+
+## Log pattern analysis
+
+{/* */}
+
+Use log pattern analysis to find patterns in unstructured log messages and examine your data.
+When you run a log pattern analysis, it performs categorization analysis on a selected field,
+creates categories based on the data, and then displays them together in a chart.
+The chart shows the distribution of each category and an example document that matches the category.
+Log pattern analysis is useful when you want to examine how often different types of logs appear in your data set.
+It also helps you group logs in ways that go beyond what you can achieve with a terms aggregation.
+
+To run log pattern analysis:
+
+1. Follow the steps under to run a log rate analysis.
+1. From the **Actions** menu, choose **View in Log Pattern Analysis**.
+1. Select a category field and optionally apply any filters that you want.
+1. Click **Run pattern analysis**.
+
+ The results of the analysis are shown in a table:
+
+ ![Log pattern analysis of the message field ](../images/log-pattern-analysis.png)
+
+1. From the **Actions** menu, click the plus (or minus) icon to open **Discover** and show (or filter out) the given category there, which helps you to further examine your log messages.
+
+{/* TODO: Question: Is the log pattern analysis only available through the log rate analysis UI? */}
+
+{/* TODO: Add some good examples to this topic taken from existing docs or recommendations from reviewers. */}
diff --git a/docs/en/serverless/aiops/aiops-detect-anomalies.mdx b/docs/en/serverless/aiops/aiops-detect-anomalies.mdx
new file mode 100644
index 0000000000..4be610691e
--- /dev/null
+++ b/docs/en/serverless/aiops/aiops-detect-anomalies.mdx
@@ -0,0 +1,263 @@
+---
+id: serverlessObservabilityAiopsDetectAnomalies
+slug: /serverless/observability/aiops-detect-anomalies
+title: Detect anomalies
+description: Detect anomalies by comparing real-time and historical data from different sources to look for unusual, problematic patterns.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+The anomaly detection feature in Elastic ((observability)) automatically models the normal behavior of your time series data — learning trends,
+periodicity, and more — in real time to identify anomalies, streamline root cause analysis, and reduce false positives.
+
+To set up anomaly detection, you create and run anomaly detection jobs.
+Anomaly detection jobs use proprietary ((ml)) algorithms to detect anomalous events or patterns, such as:
+
+* Anomalies related to temporal deviations in values, counts, or frequencies
+* Anomalies related to unusual locations in geographic data
+* Statistical rarity
+* Unusual behaviors for a member of a population
+
+To learn more about anomaly detection algorithms, refer to the [((ml))](((ml-docs))/ml-ad-algorithms.html) documentation.
+Note that the ((ml)) documentation may contain details that are not valid when using a serverless project.
+
+
+
+A _datafeed_ retrieves time series data from ((es)) and provides it to an
+anomaly detection job for analysis.
+
+The job uses _buckets_ to divide the time series into batches for processing.
+For example, a job may use a bucket span of 1 hour.
+
+Each ((anomaly-job)) contains one or more _detectors_, which define the type of
+analysis that occurs (for example, `max`, `average`, or `rare` analytical
+functions) and the fields that are analyzed. Some of the analytical functions
+look for single anomalous data points. For example, `max` identifies the maximum
+value that is seen within a bucket. Others perform some aggregation over the
+length of the bucket. For example, `mean` calculates the mean of all the data
+points seen within the bucket.
+
+To learn more about anomaly detection, refer to the [((ml))](((ml-docs))/ml-ad-overview.html) documentation.
+
+
+
+
+
+# Create and run an anomaly detection job
+
+1. In your ((observability)) project, go to **AIOps** → **Anomaly detection**.
+1. Click **Create anomaly detection job** (or **Create job** if other jobs exist).
+1. Choose a data view or saved search to access the data you want to analyze.
+1. Select the wizard for the type of job you want to create.
+The following wizards are available.
+You might also see specialized wizards based on the type of data you are analyzing.
+
+In general, it is a good idea to start with single metric anomaly detection jobs for your key performance indicators.
+After you examine these simple analysis results, you will have a better idea of what the influencers might be.
+Then you can create multi-metric jobs and split the data or create more complex analysis functions as necessary.
+
+
+
+ Single metric
+
+ Creates simple jobs that have a single detector. A _detector_ applies an analytical function to specific fields in your data. In addition to limiting the number of detectors, the single metric wizard omits many of the more advanced configuration options.
+
+ Multi-metric
+
+ Creates jobs that can have more than one detector, which is more efficient than running multiple jobs against the same data.
+
+ Population
+
+ Creates jobs that detect activity that is unusual compared to the behavior of the population.
+
+ Advanced
+
+ Creates jobs that can have multiple detectors and enables you to configure all job settings.
+
+ Categorization
+
+ Creates jobs that group log messages into categories and use `count` or `rare` functions to detect anomalies within them.
+
+ Rare
+
+ Creates jobs that detect rare occurrences in time series data. Rare jobs use the `rare` or `freq_rare` functions and also detect rare occurrences in populations.
+
+ Geo
+
+ Creates jobs that detect unusual occurrences in the geographic locations of your data. Your data set must contain geo data.
+
+
+
+ For more information about job types, refer to the [((ml))](((ml-docs))/ml-anomaly-detection-job-types.html) documentation.
+
+
+
+ Before selecting a wizard, click **Data Visualizer** to explore the fields and metrics in your data.
+ To get the best results, you must understand your data, including its data types and the range and distribution of values.
+
+ In the **Data Visualizer**, use the time filter to select a time period that you’re interested in exploring,
+ or click **Use full data** to view the full time range of data.
+ Expand the fields to see details about the range and distribution of values.
+ When you're done, go back to the first step and create your job.
+
+5. Step through the instructions in the job creation wizard to configure your job.
+You can accept the default settings for most settings now and later.
+1. If you want the job to start immediately when the job is created, make sure that option is selected on the summary page.
+1. When you're done, click **Create job**.
+When the job runs, the ((ml)) features analyze the input stream of data, model its behavior, and perform analysis based on the detectors in each job.
+When an event occurs outside of the baselines of normal behavior, that event is identified as an anomaly.
+1. After the job is started, click **View results**.
+
+# View the results
+
+After the anomaly detection job has processed some data,
+you can view the results in Elastic ((observability)).
+
+
+Depending on the capacity of your machine,
+you might need to wait a few seconds for the analysis to generate initial results.
+
+
+If you clicked **View results** after creating the job, the results open in either the **Single Metric Viewer** or **Anomaly Explorer**.
+To switch between these tools, click the icons in the upper-left corner of each tool.
+
+Read the following sections to learn more about these tools:
+
+*
+*
+
+
+
+## View single metric job results
+
+The **Single Metric Viewer** contains a chart that represents the actual and expected values over time:
+
+![Single Metric Viewer showing analysis ](../images/anomaly-detection-single-metric-viewer.png)
+
+* The line in the chart represents the actual data values.
+* The shaded area represents the bounds for the expected values.
+* The area between the upper and lower bounds are the most likely values for the model, using a 95% confidence level.
+That is to say, there is a 95% chance of the actual value falling within these bounds.
+If a value is outside of this area then it will usually be identified as anomalous.
+
+
+ Expected values are available only if **Enable model plot** was selected under Job Details
+ when you created the job.
+
+
+To explore your data:
+
+1. If the **Single Metric Explorer** is not already open, go to **AIOps** → **Anomaly detection** and click the Single Metric Explorer icon next to the job you created.
+Note that the Single Metric Explorer icon will be grayed out for advanced or multi-metric jobs.
+1. In the time filter, specify a time range that covers the majority of the analyzed data points.
+1. Notice that the model improves as it processes more data.
+At the beginning, the expected range of values is pretty broad, and the model is not capturing the periodicity in the data.
+But it quickly learns and begins to reflect the patterns in your data.
+The duration of the learning process heavily depends on the characteristics and complexity of the input data.
+1. Look for anomaly data points, depicted by colored dots or cross symbols, and hover over a data point to see more details about the anomaly.
+Note that anomalies with medium or high multi-bucket impact are depicted with a cross symbol instead of a dot.
+
+ Any data points outside the range that was predicted by the model are marked
+ as anomalies. In order to provide a sensible view of the results, an
+ _anomaly score_ is calculated for each bucket time interval. The anomaly score
+ is a value from 0 to 100, which indicates the significance of the anomaly
+ compared to previously seen anomalies. The highly anomalous values are shown in
+ red and the low scored values are shown in blue. An interval with a high
+ anomaly score is significant and requires investigation.
+ For more information about anomaly scores, refer to the [((ml))](((ml-docs))/ml-ad-explain.html) documentation.
+
+1. (Optional) Annotate your job results by drag-selecting a period of time and entering annotation text.
+Annotations are notes that refer to events in a specific time period.
+They can be created by the user or generated automatically by the anomaly detection job to reflect model changes and noteworthy occurrences.
+1. Under **Anomalies**, expand each anomaly to see key details, such as the time, the actual and expected ("typical") values, and their probability.
+The **Anomaly explanation** section gives you further insights about each anomaly, such as its type and impact, to make it easier to interpret the job results:
+
+ ![Single Metric Viewer showing anomaly details ](../images/anomaly-detection-details.png)
+
+ By default, the **Anomalies** table contains all anomalies that have a severity of "warning" or higher in the selected section of the timeline.
+ If you are only interested in critical anomalies, for example, you can change the severity threshold for this table.
+
+1. (Optional) From the **Actions** menu in the **Anomalies** table, you can choose to view relevant documents in **Discover** or create a job rule.
+Job rules instruct anomaly detectors to change their behavior based on domain-specific knowledge that you provide.
+To learn more, refer to
+
+After you have identified anomalies, often the next step is to try to determine
+the context of those situations. For example, are there other factors that are
+contributing to the problem? Are the anomalies confined to particular
+applications or servers? You can begin to troubleshoot these situations by
+layering additional jobs or creating multi-metric jobs.
+
+
+
+## View advanced or multi-metric job results
+
+Conceptually, you can think of _multi-metric anomaly detection jobs_ as running multiple independent single metric jobs.
+By bundling them together in a multi-metric job, however,
+you can see an overall score and shared influencers for all the metrics and all the entities in the job.
+Multi-metric jobs therefore scale better than having many independent single metric jobs.
+They also provide better results when you have influencers that are shared across the detectors.
+
+
+When you create an anomaly detection job, you can identify fields as _influencers_.
+These are fields that you think contain information about someone or something that influences or contributes to anomalies.
+As a best practice, do not pick too many influencers.
+For example, you generally do not need more than three.
+If you pick many influencers, the results can be overwhelming, and there is some overhead to the analysis.
+
+To learn more about influencers, refer to the [((ml))](((ml-docs))/ml-ad-run-jobs.html#ml-ad-influencers) documentation.
+
+
+
+You can also configure your anomaly detection jobs to split a single time series into multiple time series based on a categorical field.
+For example, you could create a job for analyzing response code rates that has a single detector that splits the data based on the `response.keyword`,
+and uses the `count` function to determine when the number of events is anomalous.
+You might use a job like this if you want to look at both high and low request rates partitioned by response code.
+
+To view advanced or multi-metric results in the
+**Anomaly Explorer**:
+
+1. If the **Anomaly Explorer** is not already open, go to **AIOps** → **Anomaly detection** and click the Anomaly Explorer icon next to the job you created.
+1. In the time filter, specify a time range that covers the majority of the analyzed data points.
+1. If you specified influencers during job creation, the view includes a list of the top influencers for all of the detected anomalies in that same time period.
+The list includes maximum anomaly scores, which in this case are aggregated for each influencer, for each bucket, across all detectors.
+There is also a total sum of the anomaly scores for each influencer.
+Use this list to help you narrow down the contributing factors and focus on the most anomalous entities.
+1. Under **Anomaly timeline**, click a section in the swim lanes to obtain more information about the anomalies in that time period.
+ ![Anomaly Explorer showing swim lanes with anomaly selected ](../images/anomaly-explorer.png)
+ You can see exact times when anomalies occurred.
+ If there are multiple detectors or metrics in the job, you can see which caught the anomaly.
+ You can also switch to viewing this time series in the **Single Metric Viewer** by selecting **View series** in the **Actions** menu.
+1. Under **Anomalies** (in the **Anomaly Explorer**), expand an anomaly to see key details, such as the time,
+the actual and expected ("typical") values, and the influencers that contributed to the anomaly:
+
+ ![Anomaly Explorer showing anomaly details ](../images/anomaly-detection-multi-metric-details.png)
+
+ By default, the **Anomalies** table contains all anomalies that have a severity of "warning" or higher in the selected section of the timeline.
+ If you are only interested in critical anomalies, for example, you can change the severity threshold for this table.
+
+ If your job has multiple detectors, the table aggregates the anomalies to show the highest severity anomaly per detector and entity,
+ which is the field value that is displayed in the **found for** column.
+
+ To view all the anomalies without any aggregation, set the **Interval** to **Show all**.
+
+
+ The anomaly scores that you see in each section of the **Anomaly Explorer** might differ slightly.
+ This disparity occurs because for each job there are bucket results, influencer results, and record results.
+ Anomaly scores are generated for each type of result.
+ The anomaly timeline uses the bucket-level anomaly scores.
+ The list of top influencers uses the influencer-level anomaly scores.
+ The list of anomalies uses the record-level anomaly scores.
+
+
+## Next steps
+
+After setting up an anomaly detection job, you may want to:
+
+*
+*
+*
diff --git a/docs/en/serverless/aiops/aiops-detect-change-points.mdx b/docs/en/serverless/aiops/aiops-detect-change-points.mdx
new file mode 100644
index 0000000000..2f83f3c721
--- /dev/null
+++ b/docs/en/serverless/aiops/aiops-detect-change-points.mdx
@@ -0,0 +1,69 @@
+---
+id: serverlessObservabilityAiopsDetectChangePoints
+slug: /serverless/observability/aiops-detect-change-points
+title: Detect change points
+description: Detect distribution changes, trend changes, and other statistically significant change points in a metric of your time series data.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+{/* */}
+
+The change point detection feature in Elastic ((observability)) detects distribution changes,
+trend changes, and other statistically significant change points in time series data.
+Unlike anomaly detection, change point detection does not require you to configure a job or generate a model.
+Instead you select a metric and immediately see a visual representation that splits the time series into two parts, before and after the change point.
+
+Elastic ((observability)) uses a [change point aggregation](((ref))/search-aggregations-change-point-aggregation.html)
+to detect change points. This aggregation can detect change points when:
+
+* a significant dip or spike occurs
+* the overall distribution of values has changed significantly
+* there was a statistically significant step up or down in value distribution
+* an overall trend change occurs
+
+To detect change points:
+
+1. In your ((observability)) project, go to **AIOps** → **Change point detection**.
+1. Choose a data view or saved search to access the data you want to analyze.
+1. Select a function: **avg**, **max**, **min**, or **sum**.
+1. In the time filter, specify a time range over which you want to detect change points.
+1. From the **Metric field** list, select a field you want to check for change points.
+1. (Optional) From the **Split field** list, select a field to split the data by.
+If the cardinality of the split field exceeds 10,000, only the first 10,000 values, sorted by document count, are analyzed.
+Use this option when you want to investigate the change point across multiple instances, pods, clusters, and so on.
+For example, you may want to view CPU utilization split across multiple instances without having to jump across multiple dashboards and visualizations.
+
+
+ You can configure a maximum of six combinations of a function applied to a metric field, partitioned by a split field, to identify change points.
+
+
+The change point detection feature automatically dissects the time series into multiple points within the given time window,
+tests whether the behavior is statistically different before and after each point in time, and then detects a change point if one exists:
+
+ ![Change point detection UI showing change points split by process ](../images/change-point-detection.png)
+
+The resulting view includes:
+
+* The timestamp of the change point
+* A preview chart
+* The type of change point and its p-value. The p-value indicates the magnitude of the change; lower values indicate more significant changes.
+* The name and value of the split field, if used.
+
+If the analysis is split by a field, a separate chart is shown for every partition that has a detected change point.
+The chart displays the type of change point, its value, and the timestamp of the bucket where the change point has been detected.
+
+On the **Change point detection page**, you can also:
+
+* Select a subset of charts and click **View selected** to view only the selected charts.
+
+ ![View selected change point detection charts ](../images/change-point-detection-view-selected.png)
+
+* Filter the results by specific types of change points by using the change point type selector:
+
+ ![Change point detection filter by type list](../images/change-point-detection-filter-by-type.png)
+
+* Attach change points to a chart or dashboard by using the context menu:
+
+ ![Change point detection add to charts menu](../images/change-point-detection-attach-charts.png)
diff --git a/docs/en/serverless/aiops/aiops-forecast-anomaly.mdx b/docs/en/serverless/aiops/aiops-forecast-anomaly.mdx
new file mode 100644
index 0000000000..a56315e11a
--- /dev/null
+++ b/docs/en/serverless/aiops/aiops-forecast-anomaly.mdx
@@ -0,0 +1,46 @@
+---
+id: serverlessObservabilityAiopsDetectAnomaliesForecast
+slug: /serverless/observability/aiops-forecast-anomalies
+title: Forecast future behavior
+description: Predict future behavior of your data by creating a forecast for an anomaly detection job.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+In addition to detecting anomalous behavior in your data,
+you can use the ((ml)) features to predict future behavior.
+
+You can use a forecast to estimate a time series value at a specific future date.
+For example, you might want to determine how much disk usage to expect
+next Sunday at 09:00.
+
+You can also use a forecast to estimate the probability of a time series value occurring at a future date.
+For example, you might want to determine how likely it is that your disk utilization will reach 100% before the end of next week.
+
+To create a forecast:
+
+1. and view the results in the **Single Metric Viewer**.
+1. Click **Forecast**.
+1. Specify a duration for your forecast.
+This value indicates how far to extrapolate beyond the last record that was processed.
+You must use time units, for example 1w, 1d, 1h, and so on.
+1. Click **Run**.
+1. View the forecast in the **Single Metric Viewer**:
+
+ ![Single Metric Viewer showing forecast ](../images/anomaly-detection-forecast.png)
+
+ * The line in the chart represents the predicted data values.
+ * The shaded area represents the bounds for the predicted values, which also gives an indication of the confidence of the predictions.
+ * Note that the bounds generally increase with time (that is to say, the confidence levels decrease),
+ since you are forecasting further into the future.
+ Eventually if the confidence levels are too low, the forecast stops.
+
+1. (Optional) After the job has processed more data, click the **Forecast** button again to compare the forecast to actual data.
+
+ The resulting chart will contain the actual data values, the bounds for the expected values, the anomalies, the forecast data values, and the bounds for the forecast.
+ This combination of actual and forecast data gives you an indication of how well the ((ml)) features can extrapolate the future behavior of the data.
diff --git a/docs/en/serverless/aiops/aiops-tune-anomaly-detection-job.mdx b/docs/en/serverless/aiops/aiops-tune-anomaly-detection-job.mdx
new file mode 100644
index 0000000000..9ea318b5d4
--- /dev/null
+++ b/docs/en/serverless/aiops/aiops-tune-anomaly-detection-job.mdx
@@ -0,0 +1,178 @@
+---
+id: serverlessObservabilityAiopsTuneAnomalyDetectionJob
+slug: /serverless/observability/aiops-tune-anomaly-detection-job
+title: Tune your anomaly detection job
+description: Tune your job by creating calendars, adding job rules, and defining custom URLs.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+After you run an anomaly detection job and view the results,
+you might find that you need to alter the job configuration or settings.
+
+To further tune your job, you can:
+
+* that contain a list of scheduled events for which you do not want to generate anomalies, such as planned system outages or public holidays.
+* that instruct anomaly detectors to change their behavior based on domain-specific knowledge that you provide.
+Your job rules can use filter lists, which contain values that you can use to include or exclude events from the ((ml)) analysis.
+* to make dashboards and other resources readily available when viewing job results.
+
+For more information about tuning your job,
+refer to the how-to guides in the [((ml))](((ml-docs))/anomaly-how-tos.html) documentation.
+Note that the ((ml)) documentation may contain details that are not valid when using a fully-managed Elastic project.
+
+
+ You can also create calendars and add URLs when configuring settings during job creation,
+ but generally it's easier to start with a simple job and add complexity later.
+
+
+
+
+## Create calendars
+
+Sometimes there are periods when you expect unusual activity to take place,
+such as bank holidays, "Black Friday", or planned system outages.
+If you identify these events in advance, no anomalies are generated during that period.
+The ((ml)) model is not ill-affected, and you do not receive spurious results.
+
+To create a calendar and add scheduled events:
+
+1. In your ((observability)) project, go to **AIOps** → **Anomaly detection**.
+1. Click **Settings**.
+1. Under **Calendars**, click **Create**.
+1. Enter an ID and description for the calendar.
+1. Select the jobs you want to apply the calendar to, or turn on **Apply calendar to all jobs**.
+1. Under **Events**, click **New event** or click **Import events** to import events from an iCalendar (ICS) file:
+
+ ![Create new calendar page](../images/anomaly-detection-create-calendar.png)
+
+ A scheduled event must have a start time, end time, and calendar ID.
+ In general, scheduled events are short in duration (typically lasting from a few hours to a day) and occur infrequently.
+ If you have regularly occurring events, such as weekly maintenance periods,
+ you do not need to create scheduled events for these circumstances;
+ they are already handled by the ((ml)) analytics.
+ If your ICS file contains recurring events, only the first occurrence is imported.
+
+1. When you're done adding events, save your calendar.
+
+You must identify scheduled events *before* your anomaly detection job analyzes the data for that time period.
+((ml-cap)) results are not updated retroactively.
+Bucket results are generated during scheduled events, but they have an anomaly score of zero.
+
+
+ If you use long or frequent scheduled events,
+ it might take longer for the ((ml)) analytics to learn to model your data,
+ and some anomalous behavior might be missed.
+
+
+
+
+## Create job rules and filters
+
+By default, anomaly detection is unsupervised,
+and the ((ml)) models have no awareness of the domain of your data.
+As a result, anomaly detection jobs might identify events that are statistically significant but are uninteresting when you know the larger context.
+
+You can customize anomaly detection by creating custom job rules.
+*Job rules* instruct anomaly detectors to change their behavior based on domain-specific knowledge that you provide.
+When you create a rule, you can specify conditions, scope, and actions.
+When the conditions of a rule are satisfied, its actions are triggered.
+
+
+If you have an anomaly detector that is analyzing CPU usage,
+you might decide you are only interested in anomalies where the CPU usage is greater than a certain threshold.
+You can define a rule with conditions and actions that instruct the detector to refrain from generating ((ml)) results when there are anomalous events related to low CPU usage.
+You might also decide to add a scope for the rule so that it applies only to certain machines.
+The scope is defined by using ((ml)) filters.
+
+
+*Filters* contain a list of values that you can use to include or exclude events from the ((ml)) analysis.
+You can use the same filter in multiple anomaly detection jobs.
+
+
+If you are analyzing web traffic, you might create a filter that contains a list of IP addresses.
+The list could contain IP addresses that you trust to upload data to your website or to send large amounts of data from behind your firewall.
+You can define the rule's scope so that the action triggers only when a specific field in your data matches (or doesn't match) a value in the filter.
+This gives you much greater control over which anomalous events affect the ((ml)) model and appear in the ((ml)) results.
+
+
+To create a job rule, first create any filter lists you want to use in the rule, then configure the rule:
+
+1. In your ((observability)) project, go to **AIOps** → **Anomaly detection**.
+1. (Optional) Create one or more filter lists:
+ 1. Click **Settings**.
+ 1. Under **Filter lists**, click **Create**.
+ 1. Enter the filter list ID. This is the ID you will select when you want to use the filter list in a job rule.
+ 1. Click **Add item** and enter one item per line.
+ 1. Click **Add** then save the filter list:
+
+ ![Create filter list](../images/anomaly-detection-create-filter-list.png)
+
+1. Open the job results in the **Single Metric Viewer** or **Anomaly Explorer**.
+1. From the **Actions** menu in the **Anomalies** table, select **Configure job rules**.
+
+ ![Configure job rules menu selection](../images/anomaly-detection-configure-job-rules.png)
+
+1. Choose which actions to take when the job rule matches the anomaly: **Skip result**, **Skip model update**, or both.
+1. Under **Conditions**, add one or more conditions that must be met for the action to be triggered.
+1. Under **Scope** (if available), add one or more filter lists to limit where the job rule applies.
+1. Save the job rule.
+Note that changes to job rules take effect for new results only.
+To apply these changes to existing results, you must clone and rerun the job.
+
+
+
+## Define custom URLs
+
+You can optionally attach one or more custom URLs to your anomaly detection jobs.
+Links for these URLs will appear in the **Actions** menu of the anomalies table when viewing job results in the **Single Metric Viewer** or **Anomaly Explorer**.
+Custom URLs can point to dashboards, the Discover app, or external websites.
+For example, you can define a custom URL that enables users to drill down to the source data from the results set.
+
+To add a custom URL to the **Actions** menu:
+
+1. In your ((observability)) project, go to **AIOps** → **Anomaly detection**.
+1. From the **Actions** menu in the job list, select **Edit job**.
+1. Select the **Custom URLs** tab, then click **Add custom URL**.
+1. Enter the label to use for the link text.
+1. Choose the type of resource you want to link to:
+
+
+ ((kib)) dashboard
+ Select the dashboard you want to link to.
+
+
+ Discover
+ Select the data view to use.
+
+
+ Other
+ Specify the URL for the external website.
+
+
+1. Click **Test** to test your link.
+1. Click **Add**, then save your changes.
+
+Now when you view job results in **Single Metric Viewer** or **Anomaly Explorer**,
+the **Actions** menu includes the custom link:
+
+ ![Create filter list](../images/anomaly-detection-custom-url.png)
+
+
+It is also possible to use string substitution in custom URLs.
+For example, you might have a **Raw data** URL defined as:
+
+`discover#/?_g=(time:(from:'$earliest$',mode:absolute,to:'$latest$'))&_a=(index:ff959d40-b880-11e8-a6d9-e546fe2bba5f,query:(language:kuery,query:'customer_full_name.keyword:"$customer_full_name.keyword$"'))`.
+
+The value of the `customer_full_name.keyword` field is passed to the target page when the link is clicked.
+
+For more information about using string substitution,
+refer to the [((ml))](((ml-docs))/ml-configuring-url.html#ml-configuring-url-strings) documentation.
+Note that the ((ml)) documentation may contain details that are not valid when using a fully-managed Elastic project.
+
+
diff --git a/docs/en/serverless/aiops/aiops.mdx b/docs/en/serverless/aiops/aiops.mdx
new file mode 100644
index 0000000000..e0a6934eb3
--- /dev/null
+++ b/docs/en/serverless/aiops/aiops.mdx
@@ -0,0 +1,28 @@
+---
+id: serverlessObservabilityAiops
+slug: /serverless/observability/aiops
+title: AIOps
+description: Automate anomaly detection and accelerate root cause analysis with AIOps.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+The AIOps capabilities available in Elastic ((observability)) enable you to consume and process large observability data sets at scale, reducing the time and effort required to detect, understand, investigate, and resolve incidents.
+Built on predictive analytics and ((ml)), our AIOps capabilities require no prior experience with ((ml)).
+DevOps engineers, SREs, and security analysts can get started right away using these AIOps features with little or no advanced configuration:
+
+
+
+
+ Detect anomalies by comparing real-time and historical data from different sources to look for unusual, problematic patterns.
+
+
+
+ Find and investigate the causes of unusual spikes or drops in log rates.
+
+
+
+ Detect distribution changes, trend changes, and other statistically significant change points in a metric of your time series data.
+
+
diff --git a/docs/en/serverless/alerting/aggregation-options.mdx b/docs/en/serverless/alerting/aggregation-options.mdx
new file mode 100644
index 0000000000..ad12cda703
--- /dev/null
+++ b/docs/en/serverless/alerting/aggregation-options.mdx
@@ -0,0 +1,49 @@
+---
+id: serverlessObservabilityAggregationOptions
+slug: /serverless/observability/aggregationOptions
+title: Aggregation options
+description: Learn about aggregations available in alerting rules.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+Aggregations summarize your data to make it easier to analyze.
+In some alerting rules, you can specify aggregations to gather data for the rule.
+
+The following aggregations are available in some rules:
+
+
+
+ Average
+ Average value of a numeric field.
+
+
+ Cardinality
+ Approximate number of unique values in a field.
+
+
+ Document count
+ Number of documents in the selected dataset.
+
+
+ Max
+ Highest value of a numeric field.
+
+
+ Min
+ Lowest value of a numeric field.
+
+
+ Percentile
+ Numeric value which represents the point at which n% of all values in the selected dataset are lower (choices are 95th or 99th).
+
+
+ Rate
+ Rate at which a specific field changes over time. To learn about how the rate is calculated, refer to .
+
+
+ Sum
+ Total of a numeric field in the selected dataset.
+
+
diff --git a/docs/en/serverless/alerting/aiops-generate-anomaly-alerts.mdx b/docs/en/serverless/alerting/aiops-generate-anomaly-alerts.mdx
new file mode 100644
index 0000000000..8de12f03e5
--- /dev/null
+++ b/docs/en/serverless/alerting/aiops-generate-anomaly-alerts.mdx
@@ -0,0 +1,217 @@
+---
+id: serverlessObservabilityGenerateAnomalyAlerts
+slug: /serverless/observability/aiops-generate-anomaly-alerts
+title: Create an anomaly detection rule
+description: Get alerts when anomalies match specific conditions.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+import FeatureBeta from '../partials/feature-beta.mdx'
+
+
+
+Create an anomaly detection rule to check for anomalies in one or more anomaly detection jobs.
+If the conditions of the rule are met, an alert is created, and any actions specified in the rule are triggered.
+For example, you can create a rule to check every fifteen minutes for critical anomalies and then alert you by email when they are detected.
+
+To create an anomaly detection rule:
+
+1. In your ((observability)) project, go to **AIOps** → **Anomaly detection**.
+1. In the list of anomaly detection jobs, find the job you want to check for anomalies.
+Haven't created a job yet? .
+1. From the **Actions** menu next to the job, select **Create alert rule**.
+1. Specify a name and optional tags for the rule. You can use these tags later to filter alerts.
+1. Verify that the correct job is selected and configure the alert details:
+
+ ![Anomaly detection alert settings ](../images/anomaly-detection-alert.png)
+
+1. For the result type:
+
+
+
+ **Bucket**
+ How unusual the anomaly was within the bucket of time
+
+
+ **Record**
+ What individual anomalies are present in a time range
+
+
+ **Influencer**
+ The most unusual entities in a time range
+
+
+
+1. Adjust the **Severity** to match the anomaly score that will trigger the action.
+The anomaly score indicates the significance of a given anomaly compared to previous anomalies.
+The default severity threshold is 75, which means every anomaly with an anomaly score of 75 or higher will trigger the associated action.
+
+1. (Optional) Turn on **Include interim results** to include results that are created by the anomaly detection job _before_ a bucket is finalized. These results might disappear after the bucket is fully processed.
+Include interim results if you want to be notified earlier about a potential anomaly even if it might be a false positive.
+
+1. (Optional) Expand and change **Advanced settings**:
+
+
+
+ **Lookback interval**
+ The interval used to query previous anomalies during each condition check. Setting the lookback interval lower than the default value might result in missed anomalies.
+
+
+ **Number of latest buckets**
+ The number of buckets to check to obtain the highest anomaly from all the anomalies that are found during the Lookback interval. An alert is created based on the anomaly with the highest anomaly score from the most anomalous bucket.
+
+
+1. (Optional) Under **Check the rule condition with an interval**, specify an interval, then click **Test** to check the rule condition with the interval specified.
+The button is grayed out if the datafeed is not started.
+To test the rule, start the data feed.
+1. (Optional) If you want to change how often the condition is evaluated, adjust the **Check every** setting.
+1. (Optional) Set up **Actions**.
+1. **Save** your rule.
+
+
+ Anomaly detection rules are defined as part of a job.
+ Alerts generated by these rules do not appear on the **Alerts** page.
+
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts every twelve hours.
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run. For example, you can send an email only when alert status changes to critical.
+
+![Configure when a rule is triggered](../images/alert-action-frequency.png)
+
+With the **Run when** menu you can choose if an action runs when the the anomaly score matched the condition or was recovered. For example, you can add a corresponding action for each state to ensure you are alerted when the anomaly score was matched and also when it recovers.
+
+![Choose between anomaly score matched condition or recovered](../images/alert-anomaly-action-frequency-recovered.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.anomalyExplorerUrl`
+
+ URL to open in the Anomaly Explorer.
+
+ `context.isInterim`
+
+ Indicate if top hits contain interim results.
+
+ `context.jobIds`
+
+ List of job IDs that triggered the alert.
+
+ `context.message`
+
+ Alert info message.
+
+ `context.score`
+
+ Anomaly score at the time of the notification action.
+
+ `context.timestamp`
+
+ The bucket timestamp of the anomaly.
+
+ `context.timestampIso8601`
+
+ The bucket timestamp of the anomaly in ISO8601 format.
+
+ `context.topInfluencers`
+
+ The list of top influencers. Properties include:
+
+ `influencer_field_name`
+
+ The field name of the influencer.
+
+ `influencer_field_value`
+
+ The entity that influenced, contributed to, or was to blame for the anomaly.
+
+ `score`
+
+ The influencer score. A normalized score between 0-100 which shows the influencer’s overall contribution to the anomalies.
+
+
+
+ `context.topRecords`
+
+ The list of top records. Properties include:
+
+ `actual`
+
+ The actual value for the bucket.
+
+ `by_field_value`
+
+ The value of the by field.
+
+ `field_name`
+
+ Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed.
+
+ `function`
+
+ The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`.
+
+ `over_field_name`
+
+ The field used to split the data.
+
+ `partition_field_value`
+
+ The field used to segment the analysis.
+
+ `score`
+
+ A normalized score between 0-100, which is based on the probability of the anomalousness of this record.
+
+ `typical`
+
+ The typical value for the bucket, according to analytical modeling.
+
+
+
+
+
+
+
+## Edit an anomaly detection rule
+
+To edit an anomaly detection rule:
+
+1. In your ((observability)) project, go to **AIOps** → **Anomaly detection**.
+1. Expand the job that uses the rule you want to edit.
+1. On the **Job settings** tab, under **Alert rules**, click the rule to edit it.
diff --git a/docs/en/serverless/alerting/alerting-connectors.mdx b/docs/en/serverless/alerting/alerting-connectors.mdx
new file mode 100644
index 0000000000..e8217d7bc0
--- /dev/null
+++ b/docs/en/serverless/alerting/alerting-connectors.mdx
@@ -0,0 +1,23 @@
+* [D3 Security](((kibana-ref))/d3security-action-type.html)
+* [Email](((kibana-ref))/email-action-type.html)
+* [((ibm-r))](((kibana-ref))/resilient-action-type.html)
+* [Index](((kibana-ref))/index-action-type.html)
+* [Jira](((kibana-ref))/jira-action-type.html)
+* [Microsoft Teams](((kibana-ref))/teams-action-type.html[)
+* [((opsgenie))](((kibana-ref))/opsgenie-action-type.html)
+* [PagerDuty](((kibana-ref))/pagerduty-action-type.html)
+* [Server log](((kibana-ref))/server-log-action-type.html)
+* [((sn-itom))](((kibana-ref))/servicenow-itom-action-type.html)
+* [((sn-itsm))](((kibana-ref))/servicenow-action-type.html)
+* [((sn-sir))](((kibana-ref))/servicenow-sir-action-type.html)
+* [Slack](((kibana-ref))/slack-action-type.html)
+* [((swimlane))](((kibana-ref))/swimlane-action-type.html)
+* [Torq](((kibana-ref))/torq-action-type.html)
+* [((webhook))](((kibana-ref))/webhook-action-type.html)
+* [xMatters](((kibana-ref))/xmatters-action-type.html)
+
+
+ Some connector types are paid commercial features, while others are free.
+ For a comparison of the Elastic subscription levels, go to
+ [the subscription page](https://www.elastic.co/subscriptions).
+
diff --git a/docs/en/serverless/alerting/alerting.mdx b/docs/en/serverless/alerting/alerting.mdx
new file mode 100644
index 0000000000..326eedb5b9
--- /dev/null
+++ b/docs/en/serverless/alerting/alerting.mdx
@@ -0,0 +1,32 @@
+---
+id: serverlessObservabilityAlerting
+slug: /serverless/observability/alerting
+title: Alerting
+description: Get alerts based on rules you define for detecting complex conditions in your applications and services.
+tags: [ 'serverless', 'observability', 'overview', 'alerting' ]
+---
+
+
+
+Alerting enables you to define _rules_, which detect complex conditions within different apps and trigger actions when those conditions are met. Alerting provides a set of built-in connectors and rules for you to use. This page describes all of these elements and how they operate together.
+
+## Important concepts
+
+Alerting works by running checks on a schedule to detect conditions defined by a rule. You can define rules at different levels (service, environment, transaction) or use custom KQL queries. When a condition is met, the rule tracks it as an _alert_ and responds by triggering one or more _actions_.
+
+Actions typically involve interaction with Elastic services or third-party integrations. enable actions to talk to these services and integrations.
+
+Once you've defined your rules, you can monitor any alerts triggered by these rules in real time, with detailed dashboards that help you quickly identify and troubleshoot any issues that may arise. You can also extend your alerts with notifications via services or third-party incident management systems.
+
+## Alerts page
+
+On the **Alerts** page, the Alerts table provides a snapshot of alerts occurring within the specified time frame. The table includes the alert status, when it was last updated, the reason for the alert, and more.
+
+![Summary of Alerts on the ((observability)) overview page](../images/observability-alerts-overview.png)
+
+You can filter this table by alert status or time period, customize the visible columns, and search for specific alerts (for example, alerts related to a specific service or environment) using KQL. Select **View alert detail** from the **More actions** menu , or click the Reason link for any alert to in detail, and you can then either **View in app** or **View rule details**.
+
+## Next steps
+
+*
+*
diff --git a/docs/en/serverless/alerting/create-anomaly-alert-rule.mdx b/docs/en/serverless/alerting/create-anomaly-alert-rule.mdx
new file mode 100644
index 0000000000..3e25bb7fd3
--- /dev/null
+++ b/docs/en/serverless/alerting/create-anomaly-alert-rule.mdx
@@ -0,0 +1,111 @@
+---
+id: serverlessObservabilityCreateAnomalyAlertRule
+slug: /serverless/observability/create-anomaly-alert-rule
+title: Create an APM anomaly rule
+description: Get alerts when either the latency, throughput, or failed transaction rate of a service is abnormal.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+You can create an anomaly rule to alert you when either the latency, throughput, or failed transaction rate of a service is abnormal. Anomaly rules can be set at different levels: environment, service, and/or transaction type. Add actions to raise alerts via services or third-party integrations (for example, send an email or create a Jira issue).
+
+![Create rule for APM anomaly alert](../images/alerts-create-apm-anomaly.png)
+
+
+These steps show how to use the **Alerts** UI.
+You can also create an anomaly rule directly from any page within **Applications**. Click the **Alerts and rules** button, and select **Create anomaly rule**. When you create a rule this way, the **Name** and **Tags** fields will be prepopulated but you can still change these.
+
+
+To create your anomaly rule:
+
+1. In your ((observability)) project, go to **Alerts**.
+1. Select **Manage Rules** from the **Alerts** page, and select **Create rule**.
+1. Enter a **Name** for your rule, and any optional **Tags** for more granular reporting (leave blank if unsure).
+1. Select the **APM Anomaly** rule type.
+1. Select the appropriate **Service**, **Type**, and **Environment** (or leave **ALL** to include all options).
+1. Select the desired severity (critical, major, minor, warning) from **Has anomaly with severity**.
+1. Define the interval to check the rule (for example, check every 1 minute).
+1. (Optional) Set up **Actions**.
+1. **Save** your rule.
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts every twelve hours.
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run. For example, you can send an email only when the alert status changes to critical.
+
+![Configure when a rule is triggered](../images/alert-action-frequency.png)
+
+With the **Run when** menu you can choose if an action runs when the threshold for an alert is reached, or when the alert is recovered. For example, you can add a corresponding action for each state to ensure you are alerted when the rule is triggered and also when it recovers.
+
+![Choose between threshold met or recovered](../images/alert-apm-action-frequency-recovered.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.environment`
+
+ The transaction type the alert is created for.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.serviceName`
+
+ The service the alert is created for.
+
+ `context.threshold`
+
+ Any trigger value above this value will cause the alert to fire.
+
+ `context.transactionType`
+
+ The transaction type the alert is created for.
+
+ `context.triggerValue`
+
+ The value that breached the threshold and triggered the alert.
+
+ `context.viewInAppUrl`
+
+ Link to the alert source.
+
+
+
+
diff --git a/docs/en/serverless/alerting/create-custom-threshold-alert-rule.mdx b/docs/en/serverless/alerting/create-custom-threshold-alert-rule.mdx
new file mode 100644
index 0000000000..3c9356822d
--- /dev/null
+++ b/docs/en/serverless/alerting/create-custom-threshold-alert-rule.mdx
@@ -0,0 +1,210 @@
+---
+id: serverlessObservabilityCreateCustomThresholdAlertRule
+slug: /serverless/observability/create-custom-threshold-alert-rule
+title: Create a custom threshold rule
+description: Get alerts when an Observability data type reach a given value.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Create a custom threshold rule to trigger an alert when an ((observability)) data type reaches or exceeds a given value.
+
+1. To access this page, from your project go to **Alerts**.
+1. Click **Manage Rules** -> **Create rule**.
+1. Under **Select rule type**, select **Custom threshold**.
+
+![Rule details (custom threshold)](../images/custom-threshold-rule.png)
+
+
+
+## Define rule data
+
+Specify the following settings to define the data the rule applies to:
+
+* **Select a data view:** Click the data view field to search for and select a data view that points to the indices or data streams that you're creating a rule for. You can also create a _new_ data view by clicking **Create a data view**. Refer to [Create a data view](((kibana-ref))/data-views.html) for more on creating data views.
+* **Define query filter (optional):** Use a query filter to narrow down the data that the rule applies to. For example, set a query filter to a specific host name using the query filter `host.name:host-1` to only apply the rule to that host.
+
+
+
+## Set rule conditions
+
+Set the conditions for the rule to detect using aggregations, an equation, and a threshold.
+
+
+
+### Set aggregations
+
+Aggregations summarize your data to make it easier to analyze.
+Set any of the following aggregation types to gather data to create your rule:
+`Average`, `Max`, `Min`, `Cardinality`, `Count`, `Sum,` `Percentile`, or `Rate`.
+For more information about these options, refer to .
+
+For example, to gather the total number of log documents with a log level of `warn`:
+
+1. Set the **Aggregation** to **Count**, and set the **KQL Filter** to `log.level: "warn"`.
+1. Set the threshold to `IS ABOVE 100` to trigger an alert when the number of log documents with a log level of `warn` reaches 100.
+
+
+
+### Set the equation and threshold
+
+Set an equation using your aggregations. Based on the results of your equation, set a threshold to define when to trigger an alert. The equations use basic math or boolean logic. Refer to the following examples for possible use cases.
+
+
+
+### Basic math equation
+
+Add, subtract, multiply, or divide your aggregations to define conditions for alerting.
+
+**Example:**
+Set an equation and threshold to trigger an alert when a metric is above a threshold. For this example, we'll use average CPU usage—the percentage of CPU time spent in states other than `idle` or `IOWait` normalized by the number of CPU cores—and trigger an alert when CPU usage is above a specific percentage. To do this, set the following aggregations, equation, and threshold:
+
+1. Set the following aggregations:
+ * **Aggregation A:** Average `system.cpu.user.pct`
+ * **Aggregation B:** Average `system.cpu.system.pct`
+ * **Aggregation C:** Max `system.cpu.cores`.
+1. Set the equation to `(A + B) / C * 100`
+1. Set the threshold to `IS ABOVE 95` to alert when CPU usage is above 95%.
+
+
+
+### Boolean logic
+
+Use conditional operators and comparison operators with you aggregations to define conditions for alerting.
+
+**Example:**
+Set an equation and threshold to trigger an alert when the number of stateful pods differs from the number of desired pods. For this example, we'll use `kubernetes.statefulset.ready` and `kubernetes.statefulset.desired`, and trigger an alert when their values differ. To do this, set the following aggregations, equation, and threshold:
+
+1. Set the following aggregations:
+ * **Aggregation A:** Sum `kubernetes.statefulset.ready`
+ * **Aggregation B:** Sum `kubernetes.statefulset.desired`
+1. Set the equation to `A == B ? 1 : 0`. If A and B are equal, the result is `1`. If they're not equal, the result is `0`.
+1. Set the threshold to `IS BELOW 1` to trigger an alert when the result is `0` and the field values do not match.
+
+
+
+## Preview chart
+
+The preview chart provides a visualization of how many entries match your configuration.
+The shaded area shows the threshold you've set.
+
+
+
+## Group alerts by (optional)
+
+Set one or more **group alerts by** fields for custom threshold rules to perform a composite aggregation against the selected fields.
+When any of these groups match the selected rule conditions, an alert is triggered _per group_.
+
+When you select multiple groupings, the group name is separated by commas.
+
+For example, if you group alerts by the `host.name` and `host.architecture` fields, and there are two hosts (`Host A` and `Host B`) and two architectures (`Architecture A` and `Architecture B`), the composite aggregation forms multiple groups.
+
+If the `Host A, Architecture A` group matches the rule conditions, but the `Host B, Architecture B` group doesn't, one alert is triggered for `Host A, Architecture A`.
+
+If you select one field—for example, `host.name`—and `Host A` matches the conditions but `Host B` doesn't, one alert is triggered for `Host A`.
+If both groups match the conditions, alerts are triggered for both groups.
+
+When you select **Alert me if a group stops reporting data**, the rule is triggered if a group that previously reported metrics does not report them again over the expected time period.
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency.
+You can choose to create a summary of alerts on each check interval or on a custom interval.
+Alternatively, you can set the action frequency such that you choose how often the action runs (for example,
+at each check interval, only when the alert status changes, or at a custom action interval).
+In this case, you must also select the specific threshold condition that affects when actions run: `Alert`, `No Data`, or `Recovered`.
+
+![Configure when a rule is triggered](../images/custom-threshold-run-when.png)
+
+You can also further refine the conditions under which actions run by specifying that actions only run when they match a KQL query or when an alert occurs within a specific time frame:
+
+- **If alert matches query**: Enter a KQL query that defines field-value pairs or query conditions that must be met for notifications to send. The query only searches alert documents in the indices specified for the rule.
+- **If alert is generated during timeframe**: Set timeframe details. Notifications are only sent if alerts are generated within the timeframe you define.
+
+![Configure a conditional alert](../images/logs-threshold-conditional-alert.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.cloud`
+
+ The cloud object defined by ECS if available in the source.
+
+ `context.container`
+
+ The container object defined by ECS if available in the source.
+
+ `context.group`
+
+ The object containing groups that are reporting data.
+
+ `context.host`
+
+ The host object defined by ECS if available in the source.
+
+ `context.labels`
+
+ List of labels associated with the entity where this alert triggered.
+
+ `context.orchestrator`
+
+ The orchestrator object defined by ECS if available in the source.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.tags`
+
+ List of tags associated with the entity where this alert triggered.
+
+ `context.timestamp`
+
+ A timestamp of when the alert was detected.
+
+ `context.value`
+
+ List of the condition values.
+
+ `context.viewInAppUrl`
+
+ Link to the alert source.
+
+
+
+
diff --git a/docs/en/serverless/alerting/create-elasticsearch-query-alert-rule.mdx b/docs/en/serverless/alerting/create-elasticsearch-query-alert-rule.mdx
new file mode 100644
index 0000000000..b4ef591674
--- /dev/null
+++ b/docs/en/serverless/alerting/create-elasticsearch-query-alert-rule.mdx
@@ -0,0 +1,266 @@
+---
+id: serverlessObservabilityCreateElasticsearchQueryRule
+slug: /serverless/observability/create-elasticsearch-query-rule
+title: Create an Elasticsearch query rule
+description: Get alerts when matches are found during the latest query run.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+The ((es)) query rule type runs a user-configured query, compares the number of
+matches to a configured threshold, and schedules actions to run when the
+threshold condition is met.
+
+1. To access this page, from your project go to **Alerts**.
+1. Click **Manage Rules** → **Create rule**.
+1. Under **Select rule type**, select **((es)) query**.
+
+An ((es)) query rule can be defined using ((es)) Query Domain Specific Language (DSL), ((es)) Query Language (ES|QL), ((kib)) Query Language (KQL), or Lucene.
+
+## Define the conditions
+
+When you create an ((es)) query rule, your choice of query type affects the information you must provide.
+For example:
+
+![Define the condition to detect](../images/alerting-rule-types-es-query-conditions.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+1. Define your query
+
+ If you use [query DSL](((ref))/query-dsl.html), you must select an index and time field then provide your query.
+ Only the `query`, `fields`, `_source` and `runtime_mappings` fields are used, other DSL fields are not considered.
+ For example:
+
+ ```sh
+ {
+ "query":{
+ "match_all" : {}
+ }
+ }
+ ```
+
+ If you use [KQL](((kibana-ref))/kuery-query.html) or [Lucene](((kibana-ref))/lucene-query.html), you must specify a data view then define a text-based query.
+ For example, `http.request.referrer: "https://example.com"`.
+
+
+ If you use [ES|QL](((ref))/esql.html), you must provide a source command followed by an optional series of processing commands, separated by pipe characters (|).
+ For example:
+
+ ```sh
+ FROM kibana_sample_data_logs
+ | STATS total_bytes = SUM(bytes) BY host
+ | WHERE total_bytes > 200000
+ | SORT total_bytes DESC
+ | LIMIT 10
+ ```
+
+1. If you use query DSL, KQL, or Lucene, set the group and theshold.
+
+ When
+ : Specify how to calculate the value that is compared to the threshold. The value is calculated by aggregating a numeric field within the time window. The aggregation options are: `count`, `average`, `sum`, `min`, and `max`. When using `count` the document count is used and an aggregation field is not necessary.
+ Over or Grouped Over
+ : Specify whether the aggregation is applied over all documents or split into groups using up to four grouping fields.
+ If you choose to use grouping, it's a [terms](((ref))/search-aggregations-bucket-terms-aggregation.html) or [multi terms aggregation](((ref))/search-aggregations-bucket-multi-terms-aggregation.html); an alert will be created for each unique set of values when it meets the condition.
+ To limit the number of alerts on high cardinality fields, you must specify the number of groups to check against the threshold.
+ Only the top groups are checked.
+
+ Threshold
+ : Defines a threshold value and a comparison operator (`is above`,
+ `is above or equals`, `is below`, `is below or equals`, or `is between`). The value
+ calculated by the aggregation is compared to this threshold.
+
+1. Set the time window, which defines how far back to search for documents.
+
+1. If you use query DSL, KQL, or Lucene, set the number of documents to send to the configured actions when the threshold condition is met.
+
+1. If you use query DSL, KQL, or Lucene, choose whether to avoid alert duplication by excluding matches from the previous run.
+ This option is not available when you use a grouping field.
+
+1. Set the check interval, which defines how often to evaluate the rule conditions.
+ Generally this value should be set to a value that is smaller than the time window, to avoid gaps in
+ detection.
+
+## Test your query
+
+Use the **Test query** feature to verify that your query is valid.
+
+If you use query DSL, KQL, or Lucene, the query runs against the selected indices using the configured time window.
+The number of documents that match the query is displayed.
+For example:
+
+![Test ((es)) query returns number of matches when valid](../images/alerting-rule-types-es-query-valid.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+ If you use an ES|QL query, a table is displayed. For example:
+
+![Test ES|QL query returns a table when valid](../images/alerting-rule-types-esql-query-valid.png)
+
+If the query is not valid, an error occurs.
+
+## Add actions
+
+{/* TODO: Decide whether to use boiler plate text, or the text from the source docs for this rule. */}
+
+You can optionally send notifications when the rule conditions are met and when they are no longer met.
+In particular, this rule type supports:
+
+* alert summaries
+* actions that run when the query is matched
+* recovery actions that run when the rule conditions are no longer met
+
+For each action, you must choose a connector, which provides connection information for a service or third party integration.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts at a custom interval:
+
+![UI for defining alert summary action in an ((es)) query rule](../images/alerting-es-query-rule-action-summary.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run.
+
+With the **Run when** menu you can choose how often the action runs (at each check interval, only when the alert status changes, or at a custom action interval).
+You must also choose an action group, which indicates whether the action runs when the query is matched or when the alert is recovered.
+Each connector supports a specific set of actions for each action group.
+For example:
+
+![UI for defining a recovery action](../images/alerting-es-query-rule-action-query-matched.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+You can further refine the conditions under which actions run by specifying that actions only run when they match a KQL query or when an alert occurs within a specific time frame.
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.conditions`
+
+ A string that describes the threshold condition. Example:
+ `count greater than 4`.
+
+ `context.date`
+
+ The date, in ISO format, that the rule met the condition.
+ Example: `2022-02-03T20:29:27.732Z`.
+
+ `context.hits`
+
+ The most recent documents that matched the query. Using the
+ [Mustache](https://mustache.github.io/) template array syntax, you can iterate
+ over these hits to get values from the ((es)) documents into your actions.
+
+ For example, the message in an email connector action might contain:
+
+ ```
+ Elasticsearch query rule '{{rule.name}}' is active:
+
+ {{#context.hits}}
+ Document with {{_id}} and hostname {{_source.host.name}} has
+ {{_source.system.memory.actual.free}} bytes of memory free
+ {{/context.hits}}
+ ```
+
+ The documents returned by `context.hits` include the [`_source`](((ref))/mapping-source-field.html) field.
+ If the ((es)) query search API's [`fields`](((ref))/search-fields.html#search-fields-param) parameter is used, documents will also return the `fields` field,
+ which can be used to access any runtime fields defined by the [`runtime_mappings`](((ref))/runtime-search-request.html) parameter.
+ For example:
+
+ {/* NOTCONSOLE */}
+ ```
+ {{#context.hits}}
+ timestamp: {{_source.@timestamp}}
+ day of the week: {{fields.day_of_week}} [^1]
+ {{/context.hits}}
+ ```
+ [^1]: The `fields` parameter here is used to access the `day_of_week` runtime field.
+
+ As the [`fields`](((ref))/search-fields.html#search-fields-response) response always returns an array of values for each field,
+ the [Mustache](https://mustache.github.io/) template array syntax is used to iterate over these values in your actions.
+ For example:
+
+ ```
+ {{#context.hits}}
+ Labels:
+ {{#fields.labels}}
+ - {{.}}
+ {{/fields.labels}}
+ {{/context.hits}}
+ ```
+ {/* NOTCONSOLE */}
+
+ `context.link`
+
+ Link to Discover and show the records that triggered the alert.
+
+ `context.message`
+
+ A message for the alert. Example:
+ `rule 'my es-query' is active:`
+ `- Value: 2`
+ `- Conditions Met: Number of matching documents is greater than 1 over 5m`
+ `- Timestamp: 2022-02-03T20:29:27.732Z`
+
+ `context.title`
+
+ A title for the alert. Example:
+ `rule term match alert query matched`.
+
+ `context.value`
+
+ The value that met the threshold condition.
+
+
+
+
+
+
+
+## Handling multiple matches of the same document
+
+By default, **Exclude matches from previous run** is turned on and the rule checks
+for duplication of document matches across multiple runs. If you configure the
+rule with a schedule interval smaller than the time window and a document
+matches a query in multiple runs, it is alerted on only once.
+
+The rule uses the timestamp of the matches to avoid alerting on the same match
+multiple times. The timestamp of the latest match is used for evaluating the
+rule conditions when the rule runs. Only matches between the latest timestamp
+from the previous run and the current run are considered.
+
+Suppose you have a rule configured to run every minute. The rule uses a time
+window of 1 hour and checks if there are more than 99 matches for the query. The
+((es)) query rule type does the following:
+
+{/* [cols="3*<"] */}
+| | | |
+|---|---|---|
+| `Run 1 (0:00)` | Rule finds 113 matches in the last hour: `113 > 99` | Rule is active and user is alerted. |
+| `Run 2 (0:01)` | Rule finds 127 matches in the last hour. 105 of the matches are duplicates that were already alerted on previously, so you actually have 22 matches: `22 !> 99` | No alert. |
+| `Run 3 (0:02)` | Rule finds 159 matches in the last hour. 88 of the matches are duplicates that were already alerted on previously, so you actually have 71 matches: `71 !> 99` | No alert. |
+| `Run 4 (0:03)` | Rule finds 190 matches in the last hour. 71 of them are duplicates that were already alerted on previously, so you actually have 119 matches: `119 > 99` | Rule is active and user is alerted. |
diff --git a/docs/en/serverless/alerting/create-error-count-threshold-alert-rule.mdx b/docs/en/serverless/alerting/create-error-count-threshold-alert-rule.mdx
new file mode 100644
index 0000000000..eac849e1f7
--- /dev/null
+++ b/docs/en/serverless/alerting/create-error-count-threshold-alert-rule.mdx
@@ -0,0 +1,162 @@
+---
+id: serverlessObservabilityCreateErrorCountThresholdAlertRule
+slug: /serverless/observability/create-error-count-threshold-alert-rule
+title: Create an error count threshold rule
+description: Get alerts when the number of errors in a service exceeds a defined threshold.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Create an error count threshold rule to alert you when the number of errors in a service exceeds a defined threshold. Threshold rules can be set at different levels: environment, service, transaction type, and/or transaction name.
+
+![Create rule for error count threshold alert](../images/alerts-create-rule-error-count.png)
+
+
+These steps show how to use the **Alerts** UI.
+You can also create an error count threshold rule directly from any page within **Applications**. Click the **Alerts and rules** button, and select **Create error count rule**. When you create a rule this way, the **Name** and **Tags** fields will be prepopulated but you can still change these.
+
+
+To create your error count threshold rule:
+
+1. In your ((observability)) project, go to **Alerts**.
+1. Select **Manage Rules** from the **Alerts** page, and select **Create rule**.
+1. Enter a **Name** for your rule, and any optional **Tags** for more granular reporting (leave blank if unsure).
+1. Select the **Error count threshold** rule type from the APM use case.
+1. Select the appropriate **Service**, **Environment**, and **Error Grouping Key** (or leave **ALL** to include all options). Alternatively, you can select **Use KQL Filter** and enter a KQL expression to limit the scope of your rule.
+1. Enter the error threshold in **Is Above** (defaults to 25 errors).
+1. Define the period to be assessed in **For the last** (defaults to last 5 minutes).
+1. Choose how to **Group alerts by**. Every unique value will create an alert.
+1. Define the interval to check the rule (for example, check every 1 minute).
+1. (Optional) Set up **Actions**.
+1. **Save** your rule.
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts every twelve hours.
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run. For example, you can send an email only when the alert status changes to critical.
+
+![Configure when a rule is triggered](../images/alert-action-frequency.png)
+
+With the **Run when** menu you can choose if an action runs when the threshold for an alert is reached, or when the alert is recovered. For example, you can add a corresponding action for each state to ensure you are alerted when the rule is triggered and also when it recovers.
+
+![Choose between threshold met or recovered](../images/alert-apm-action-frequency-recovered.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.environment`
+
+ The transaction type the alert is created for.
+
+ `context.errorGroupingKey`
+
+ The error grouping key the alert is created for.
+
+ `context.errorGroupingName`
+
+ The error grouping name the alert is created for.
+
+ `context.interval`
+
+ The length and unit of time period where the alert conditions were met.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.serviceName`
+
+ The service the alert is created for.
+
+ `context.threshold`
+
+ Any trigger value above this value will cause the alert to fire.
+
+ `context.transactionName`
+
+ The transaction name the alert is created for.
+
+ `context.triggerValue`
+
+ The value that breached the threshold and triggered the alert.
+
+ `context.viewInAppUrl`
+
+ Link to the alert source.
+
+
+
+
+
+
+## Example
+
+The error count threshold alert triggers when the number of errors in a service exceeds a defined threshold. Because some errors are more important than others, this guide will focus a specific error group ID.
+
+Before continuing, identify the service name, environment name, and error group ID that you’d like to create an error count threshold rule for.
+{/* The easiest way to find an error group ID is to select the service that you’re interested in and navigating to the Errors tab. // is there a Serverless equivalent? */}
+
+This guide will create an alert for an error group ID based on the following criteria:
+
+* Service: `{your_service.name}`
+* Environment: `{your_service.environment}`
+* Error Grouping Key: `{your_error.ID}`
+* Error count is above 25 errors for the last five minutes
+* Group alerts by `service.name` and `service.environment`
+* Check every 1 minute
+* Send the alert via email to the site reliability team
+
+From any page in **Applications**, select **Alerts and rules** → **Create threshold rule** → **Error count rule**. Change the name of the alert (if you wish), but do not edit the tags.
+
+Based on the criteria above, define the following rule details:
+
+* **Service**: `{your_service.name}`
+* **Environment**: `{your_service.environment}`
+* **Error Grouping Key**: `{your_error.ID}`
+* **Is above:** `25 errors`
+* **For the last:** `5 minutes`
+* **Group alerts by:** `service.name` `service.environment`
+* **Check every:** `1 minute`
+
+Next, select the **Email** connector and click **Create a connector**. Fill out the required details: sender, host, port, etc., and select **Save**.
+
+A default message is provided as a starting point for your alert. You can use the Mustache template syntax (`{{variable}}`) to pass additional alert values at the time a condition is detected to an action. A list of available variables can be accessed by clicking the Add variable icon .
+
+Select **Save**. The alert has been created and is now active!
+
diff --git a/docs/en/serverless/alerting/create-failed-transaction-rate-threshold-alert-rule.mdx b/docs/en/serverless/alerting/create-failed-transaction-rate-threshold-alert-rule.mdx
new file mode 100644
index 0000000000..a5aebcbbd7
--- /dev/null
+++ b/docs/en/serverless/alerting/create-failed-transaction-rate-threshold-alert-rule.mdx
@@ -0,0 +1,157 @@
+---
+id: serverlessObservabilityCreateFailedTransactionRateThresholdAlertRule
+slug: /serverless/observability/create-failed-transaction-rate-threshold-alert-rule
+title: Create a failed transaction rate threshold rule
+description: Get alerts when the rate of transaction errors in a service exceeds a defined threshold.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+You can create a failed transaction rate threshold rule to alert you when the rate of transaction errors in a service exceeds a defined threshold. Threshold rules can be set at different levels: environment, service, transaction type, and/or transaction name. Add actions to raise alerts via services or third-party integrations e.g. mail, Slack, Jira.
+
+![Create rule for failed transaction rate threshold alert](../images/alerts-create-rule-failed-transaction-rate.png)
+
+
+These steps show how to use the **Alerts** UI.
+You can also create a failed transaction rate threshold rule directly from any page within **Applications**. Click the **Alerts and rules** button, and select **Create threshold rule** and then **Failed transaction rate**. When you create a rule this way, the **Name** and **Tags** fields will be prepopulated but you can still change these.
+
+
+To create your failed transaction rate threshold rule:
+
+1. In your ((observability)) project, go to **Alerts**.
+1. Select **Manage Rules** from the **Alerts** page, and select **Create rule**.
+1. Enter a **Name** for your rule, and any optional **Tags** for more granular reporting (leave blank if unsure).
+1. Select the **Failed transaction rate threshold** rule type from the APM use case.
+1. Select the appropriate **Service**, **Type**, **Environment** and **Name** (or leave **ALL** to include all options). Alternatively, you can select **Use KQL Filter** and enter a KQL expression to limit the scope of your rule.
+1. Enter a fail rate in the **Is Above** (defaults to 30%).
+1. Define the period to be assessed in **For the last** (defaults to last 5 minutes).
+1. Choose how to **Group alerts by**. Every unique value will create an alert.
+1. Define the interval to check the rule (for example, check every 1 minute).
+1. (Optional) Set up **Actions**.
+1. **Save** your rule.
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts every twelve hours.
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run. For example, you can send an email only when the alert status changes to critical.
+
+![Configure when a rule is triggered](../images/alert-action-frequency.png)
+
+With the **Run when** menu you can choose if an action runs when the threshold for an alert is reached, or when the alert is recovered. For example, you can add a corresponding action for each state to ensure you are alerted when the rule is triggered and also when it recovers.
+
+![Choose between threshold met or recovered](../images/alert-apm-action-frequency-recovered.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.environment`
+
+ The transaction type the alert is created for.
+
+ `context.interval`
+
+ The length and unit of time period where the alert conditions were met.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.serviceName`
+
+ The service the alert is created for.
+
+ `context.threshold`
+
+ Any trigger value above this value will cause the alert to fire.
+
+ `context.transactionName`
+
+ The transaction name the alert is created for.
+
+ `context.transactionType`
+
+ The transaction type the alert is created for.
+
+ `context.triggerValue`
+
+ The value that breached the threshold and triggered the alert.
+
+ `context.viewInAppUrl`
+
+ Link to the alert source.
+
+
+
+
+
+## Example
+
+The failed transaction rate threshold alert triggers when the number of transaction errors in a service exceeds a defined threshold.
+
+Before continuing, identify the service name, environment name, and transaction type that you’d like to create a failed transaction rate threshold rule for.
+
+This guide will create an alert for an error group ID based on the following criteria:
+
+* Service: `{your_service.name}`
+* Transaction: `{your_transaction.name}`
+* Environment: `{your_service.environment}`
+* Error rate is above 30% for the last five minutes
+* Group alerts by `service.name` and `service.environment`
+* Check every 1 minute
+* Send the alert via email to the site reliability team
+
+From any page in **Applications**, select **Alerts and rules** → **Create threshold rule** → **Failed transaction rate**. Change the name of the alert (if you wish), but do not edit the tags.
+
+Based on the criteria above, define the following rule details:
+
+* **Service**: `{your_service.name}`
+* **Type**: `{your_transaction.name}`
+* **Environment**: `{your_service.environment}`
+* **Is above:** `30%`
+* **For the last:** `5 minutes`
+* **Group alerts by:** `service.name` `service.environment`
+* **Check every:** `1 minute`
+
+Next, select the **Email** connector and click **Create a connector**. Fill out the required details: sender, host, port, etc., and select **Save**.
+
+A default message is provided as a starting point for your alert. You can use the Mustache template syntax (`{{variable}}`) to pass additional alert values at the time a condition is detected to an action. A list of available variables can be accessed by clicking the Add variable icon .
+
+Select **Save**. The alert has been created and is now active!
+
+
diff --git a/docs/en/serverless/alerting/create-inventory-threshold-alert-rule.mdx b/docs/en/serverless/alerting/create-inventory-threshold-alert-rule.mdx
new file mode 100644
index 0000000000..cf5713e547
--- /dev/null
+++ b/docs/en/serverless/alerting/create-inventory-threshold-alert-rule.mdx
@@ -0,0 +1,192 @@
+---
+id: serverlessObservabilityCreateInventoryThresholdAlertRule
+slug: /serverless/observability/create-inventory-threshold-alert-rule
+title: Create an inventory rule
+description: Get alerts when the infrastructure inventory exceeds a defined threshold.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+Based on the resources listed on the **Inventory** page within the ((infrastructure-app)),
+you can create a threshold rule to notify you when a metric has reached or exceeded a value for a specific
+resource or a group of resources within your infrastructure.
+
+Additionally, each rule can be defined using multiple
+conditions that combine metrics and thresholds to create precise notifications and reduce false positives.
+
+1. To access this page, go to **((observability))** -> **Infrastructure**.
+1. On the **Inventory** page or the **Metrics Explorer** page, click **Alerts and rules** -> **Infrastructure**.
+1. Select **Create inventory rule**.
+
+
+
+When you select **Create inventory alert**, the parameters you configured on the **Inventory** page will automatically
+populate the rule. You can use the Inventory first to view which nodes in your infrastructure you'd
+like to be notified about and then quickly create a rule in just a few clicks.
+
+
+
+
+
+## Inventory conditions
+
+Conditions for each rule can be applied to specific metrics relating to the inventory type you select.
+You can choose the aggregation type, the metric, and by including a warning threshold value, you can be
+alerted on multiple threshold values based on severity scores. When creating the rule, you can still get
+notified if no data is returned for the specific metric or if the rule fails to query ((es)).
+
+In this example, Kubernetes Pods is the selected inventory type. The conditions state that you will receive
+a critical alert for any pods within the `ingress-nginx` namespace with a memory usage of 95% or above
+and a warning alert if memory usage is 90% or above.
+
+![Inventory rule](../images/inventory-alert.png)
+
+{/* Does the preview capability exist when creating this rule in serverless? I do not see it in the UI. */}
+
+Before creating a rule, you can preview whether the conditions would have triggered the alert in the last
+hour, day, week, or month.
+
+![Preview rules](../images/alert-preview.png)
+
+
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a summary of alerts on each check interval or on a custom interval. For example, send email notifications that summarize the new, ongoing, and recovered alerts each hour:
+
+![Action types](../images/action-alert-summary.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+Alternatively, you can set the action frequency such that you choose how often the action runs (for example, at each check interval, only when the alert status changes, or at a custom action interval). In this case, you define precisely when the alert is triggered by selecting a specific
+threshold condition: `Alert`, `Warning`, or `Recovered` (a value that was once above a threshold has now dropped below it).
+
+![Configure when an alert is triggered](../images/inventory-threshold-run-when-selection.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+You can also further refine the conditions under which actions run by specifying that actions only run when they match a KQL query or when an alert occurs within a specific time frame:
+
+- **If alert matches query**: Enter a KQL query that defines field-value pairs or query conditions that must be met for notifications to send. The query only searches alert documents in the indices specified for the rule.
+- **If alert is generated during timeframe**: Set timeframe details. Notifications are only sent if alerts are generated within the timeframe you define.
+
+![Configure a conditional alert](../images/conditional-alerts.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.alertState`
+
+ Current state of the alert.
+
+ `context.cloud`
+
+ The cloud object defined by ECS if available in the source.
+
+ `context.container`
+
+ The container object defined by ECS if available in the source.
+
+ `context.group`
+
+ Name of the group reporting data.
+
+ `context.host`
+
+ The host object defined by ECS if available in the source.
+
+ `context.labels`
+
+ List of labels associated with the entity where this alert triggered.
+
+ `context.metric`
+
+ The metric name in the specified condition. Usage: (`ctx.metric.condition0`, `ctx.metric.condition1`, and so on).
+
+ `context.orchestrator`
+
+ The orchestrator object defined by ECS if available in the source.
+
+ `context.originalAlertState`
+
+ The state of the alert before it recovered. This is only available in the recovery context.
+
+ `context.originalAlertStateWasALERT`
+
+ Boolean value of the state of the alert before it recovered. This can be used for template conditions. This is only available in the recovery context.
+
+ `context.originalAlertStateWasWARNING`
+
+ Boolean value of the state of the alert before it recovered. This can be used for template conditions. This is only available in the recovery context.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.tags`
+
+ List of tags associated with the entity where this alert triggered.
+
+ `context.threshold`
+
+ The threshold value of the metric for the specified condition. Usage: (`ctx.threshold.condition0`, `ctx.threshold.condition1`, and so on)
+
+ `context.timestamp`
+
+ A timestamp of when the alert was detected.
+
+ `context.value`
+
+ The value of the metric in the specified condition. Usage: (`ctx.value.condition0`, `ctx.value.condition1`, and so on).
+
+ `context.viewInAppUrl`
+
+ Link to the alert source.
+
+
+
+
+
+
+
+## Settings
+
+With infrastructure threshold rules, it's not possible to set an explicit index pattern as part of the configuration. The index pattern
+is instead inferred from **Metrics indices** on the Settings page of the ((infrastructure-app)).
+
+With each execution of the rule check, the **Metrics indices** setting is checked, but it is not stored when the rule is created.
diff --git a/docs/en/serverless/alerting/create-latency-threshold-alert-rule.mdx b/docs/en/serverless/alerting/create-latency-threshold-alert-rule.mdx
new file mode 100644
index 0000000000..e1e4089f5d
--- /dev/null
+++ b/docs/en/serverless/alerting/create-latency-threshold-alert-rule.mdx
@@ -0,0 +1,160 @@
+---
+id: serverlessObservabilityCreateLatencyThresholdAlertRule
+slug: /serverless/observability/create-latency-threshold-alert-rule
+title: Create a latency threshold rule
+description: Get alerts when the latency of a specific transaction type in a service exceeds a defined threshold.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+You can create a latency threshold rule to alert you when the latency of a specific transaction type in a service exceeds a defined threshold. Threshold rules can be set at different levels: environment, service, transaction type, and/or transaction name. Add actions to raise alerts via services or third-party integrations e.g. mail, Slack, Jira.
+
+![Create rule for APM latency threshold alert](../images/alerts-create-rule-apm-latency-threshold.png)
+
+
+These steps show how to use the **Alerts** UI.
+You can also create a latency threshold rule directly from any page within **Applications**. Click the **Alerts and rules** button, and select **Create threshold rule** and then **Latency**. When you create a rule this way, the **Name** and **Tags** fields will be prepopulated but you can still change these.
+
+
+To create your latency threshold rule::
+
+1. In your ((observability)) project, go to **Alerts**.
+1. Select **Manage Rules** from the **Alerts** page, and select **Create rule**.
+1. Enter a **Name** for your rule, and any optional **Tags** for more granular reporting (leave blank if unsure).
+1. Select the **Latency threshold** rule type from the APM use case.
+1. Select the appropriate **Service**, **Type**, **Environment** and **Name** (or leave **ALL** to include all options). Alternatively, you can select **Use KQL Filter** and enter a KQL expression to limit the scope of your rule.
+1. Define the threshold and period:
+ * **When**: Choose between `Average`, `95th percentile`, or `99th percentile`.
+ * **Is Above**: Enter a time in milliseconds (defaults to 1500ms).
+ * **For the last**: Define the period to be assessed in (defaults to last 5 minutes).
+1. Choose how to **Group alerts by**. Every unique value will create an alert.
+1. Define the interval to check the rule (for example, check every 1 minute).
+1. (Optional) Set up **Actions**.
+1. **Save** your rule.
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts every twelve hours.
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run. For example, you can send an email only when the alert status changes to critical.
+
+![Configure when a rule is triggered](../images/alert-action-frequency.png)
+
+With the **Run when** menu you can choose if an action runs when the threshold for an alert is reached, or when the alert is recovered. For example, you can add a corresponding action for each state to ensure you are alerted when the rule is triggered and also when it recovers.
+
+![Choose between threshold met or recovered](../images/alert-apm-action-frequency-recovered.png)
+
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.environment`
+
+ The transaction type the alert is created for.
+
+ `context.interval`
+
+ The length and unit of time period where the alert conditions were met.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.serviceName`
+
+ The service the alert is created for.
+
+ `context.threshold`
+
+ Any trigger value above this value will cause the alert to fire.
+
+ `context.transactionName`
+
+ The transaction name the alert is created for.
+
+ `context.transactionType`
+
+ The transaction type the alert is created for.
+
+ `context.triggerValue`
+
+ The value that breached the threshold and triggered the alert.
+
+ `context.viewInAppUrl`
+
+ Link to the alert source.
+
+
+
+
+
+
+## Example
+
+The latency threshold alert triggers when the latency of a specific transaction type in a service exceeds a defined threshold.
+
+Before continuing, identify the service name, environment name, and transaction type that you’d like to create a latency threshold rule for.
+
+This guide will create an alert for an error group ID based on the following criteria:
+
+* Service: `{your_service.name}`
+* Transaction: `{your_transaction.name}`
+* Environment: `{your_service.environment}`
+* Average latency is above 1500ms for last 5 minutes
+* Group alerts by `service.name` and `service.environment`
+* Check every 1 minute
+* Send the alert via email to the site reliability team
+
+From any page in **Applications**, select **Alerts and rules** → **Create threshold rule** → **Latency threshold**. Change the name of the alert (if you wish), but do not edit the tags.
+
+Based on the criteria above, define the following rule details:
+
+* **Service**: `{your_service.name}`
+* **Type**: `{your_transaction.name}`
+* **Environment**: `{your_service.environment}`
+* **When:** `Average`
+* **Is above:** `1500ms`
+* **For the last:** `5 minutes`
+* **Group alerts by:** `service.name` `service.environment`
+* **Check every:** `1 minute`
+
+Next, select the **Email** connector and click **Create a connector**. Fill out the required details: sender, host, port, etc., and select **Save**.
+
+A default message is provided as a starting point for your alert. You can use the Mustache template syntax (`{{variable}}`) to pass additional alert values at the time a condition is detected to an action. A list of available variables can be accessed by selecting the add variable button.
+
+Select **Save**. The alert has been created and is now active!
+
diff --git a/docs/en/serverless/alerting/create-manage-rules.mdx b/docs/en/serverless/alerting/create-manage-rules.mdx
new file mode 100644
index 0000000000..f6774766cf
--- /dev/null
+++ b/docs/en/serverless/alerting/create-manage-rules.mdx
@@ -0,0 +1,142 @@
+---
+id: serverlessObservabilityCreateRules
+slug: /serverless/observability/create-manage-rules
+title: Create and manage rules
+description: Create and manage rules for alerting when conditions are met.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Alerting enables you to define _rules_, which detect complex conditions within different apps and trigger actions when those conditions are met. Alerting provides a set of built-in connectors and rules for you to use.
+
+## Observability rules
+
+Learn more about Observability rules and how to create them:
+
+
+
+
+ AIOps
+ Anomaly detection
+ Anomalies match specific conditions.
+
+
+ APM
+ APM anomaly
+ The latency, throughput, or failed transaction rate of a service is abnormal.
+
+
+ Observability
+ Custom threshold
+ An Observability data type reaches or exceeds a given value.
+
+
+ Stack
+ ((es)) query
+ Matches are found during the latest query run.
+
+
+ APM
+ Error count threshold
+ The number of errors in a service exceeds a defined threshold.
+
+
+ APM
+ Failed transaction rate threshold
+ The rate of transaction errors in a service exceeds a defined threshold.
+
+
+ Metrics
+ Inventory
+ The infrastructure inventory exceeds a defined threshold.
+
+
+ APM
+ Latency threshold
+ The latency of a specific transaction type in a service exceeds a defined threshold.
+
+
+ SLO
+ SLO burn rate rule
+ The burn rate is above a defined threshold.
+
+
+
+## Creating rules and alerts
+
+You start by defining the rule and how often it should be evaluated. You can extend these rules by adding an appropriate action (for example, send an email or create an issue) to be triggered when the rule conditions are met. These actions are defined within each rule and implemented by the appropriate connector for that action e.g. Slack, Jira. You can create any rules from scratch using the **Manage Rules** page, or you can create specific rule types from their respective UIs and benefit from some of the details being pre-filled (for example, Name and Tags).
+
+* For APM alert types, you can select **Alerts and rules** and create rules directly from the **Services**, **Traces**, and **Dependencies** UIs.
+
+* For SLO alert types, from the **SLOs** page open the **More actions** menu for an SLO and select **Create new alert rule**. Alternatively, when you create a new SLO, the **Create new SLO burn rate alert rule** checkbox is enabled by default and will prompt you to Create SLO burn rate rule upon saving the SLO.
+
+{/*
+Clarify available Logs rule
+*/}
+
+After a rule is created, you can open the **More actions** menu and select **Edit rule** to check or change the definition, and/or add or modify actions.
+
+![Edit rule (failed transaction rate)](../images/alerts-edit-rule.png)
+
+From the action menu you can also:
+
+* Disable or delete rule
+* Clone rule
+* Snooze rule notifications
+* Run rule (without waiting for next scheduled check)
+* Update API keys
+
+## View rule details
+
+Click on an individual rule on the **((rules-app))** page to view details including the rule name, status, definition, execution history, related alerts, and more.
+
+![Rule details (APM anomaly)](../images/alerts-detail-apm-anomaly.png)
+
+A rule can have one of the following responses:
+
+`failed`
+ : The rule ran with errors.
+
+`succeeded`
+ : The rule ran without errors.
+
+`warning`
+ : The rule ran with some non-critical errors.
+
+## Snooze and disable rules
+
+The rule listing enables you to quickly snooze, disable, enable, or delete individual rules.
+
+{/* ![Use the rule status dropdown to enable or disable an individual rule](images/create-and-manage-rules/user-alerting-individual-enable-disable.png) */}
+
+When you snooze a rule, the rule checks continue to run on a schedule but the
+alert will not trigger any actions. You can snooze for a specified period of
+time, indefinitely, or schedule single or recurring downtimes.
+
+{/* ![Snooze notifications for a rule](images/create-and-manage-rules/user-alerting-snooze-panel.png) */}
+
+When a rule is in a snoozed state, you can cancel or change the duration of
+this state.
+
+ To temporarily suppress notifications for _all_ rules, create a .
+
+{/* Remove tech preview? */}
+
+## Import and export rules
+
+To import and export rules, use ((saved-objects-app)).
+
+Rules are disabled on export.
+You are prompted to re-enable the rule on successful import.
+
+{/* Can you import / export rules? */}
diff --git a/docs/en/serverless/alerting/create-slo-burn-rate-alert-rule.mdx b/docs/en/serverless/alerting/create-slo-burn-rate-alert-rule.mdx
new file mode 100644
index 0000000000..78ec1968d6
--- /dev/null
+++ b/docs/en/serverless/alerting/create-slo-burn-rate-alert-rule.mdx
@@ -0,0 +1,134 @@
+---
+id: serverlessObservabilityCreateSloBurnRateAlertRule
+slug: /serverless/observability/create-slo-burn-rate-alert-rule
+title: Create an SLO burn rate rule
+description: Get alerts when the SLO failure rate is too high over a defined period of time.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+import Connectors from './alerting-connectors.mdx'
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Create an SLO burn rate rule to get alerts when the burn rate is too high over a defined threshold for two different lookback periods: a long period and a short period that is 1/12th of the long period. For example, if your long lookback period is one hour, your short lookback period is five minutes.
+
+Choose which SLO to monitor and then define multiple burn rate windows with appropriate severity. For each period, the burn rate is computed as the error rate divided by the error budget. When the burn rates for both periods surpass the threshold, an alert is triggered. Add actions to raise alerts via services or third-party integrations e.g. mail, Slack, Jira.
+
+![Create rule for failed transaction rate threshold](../images/slo-alerts-create-rule.png)
+
+
+These steps show how to use the **Alerts** UI. You can also create an SLO burn rate rule directly from **Observability** → **SLOs**.
+Click the more options icon () to the right of the SLO you want to add a burn rate rule for, and select ** Create new alert rule** from the menu.
+
+When you use the UI to create an SLO, a default SLO burn rate alert rule is created automatically.
+The burn rate rule will use the default configuration and no connector.
+You must configure a connector if you want to receive alerts for SLO breaches.
+
+
+To create an SLO burn rate rule:
+
+1. In your ((observability)) project, go to **Alerts**.
+1. Select **Manage Rules** from the **Alerts** page, and select **Create rule**.
+1. Enter a **Name** for your rule, and any optional **Tags** for more granular reporting (leave blank if unsure).
+1. Select **SLO burn rate** from the **Select rule type** list.
+1. Select the **SLO** you want to monitor.
+1. Define multiple burn rate windows for each **Action Group** (defaults to 4 windows but you can edit):
+ * **Lookback (hours)**: Enter the lookback period for this window. A shorter period equal to 1/12th of this period will be used for faster recovery.
+ * **Burn rate threshold**: Enter a burn rate for this window.
+ * **Action Group**: Select a severity for this window.
+1. Define the interval to check the rule e.g. check every 1 minute.
+1. (Optional) Set up **Actions**.
+1. **Save** your rule.
+
+## Add actions
+
+You can extend your rules with actions that interact with third-party systems, write to logs or indices, or send user notifications. You can add an action to a rule at any time. You can create rules without adding actions, and you can also define multiple actions for a single rule.
+
+To add actions to rules, you must first create a connector for that service (for example, an email or external incident management system), which you can then use for different rules, each with their own action frequency.
+
+
+Connectors provide a central place to store connection information for services and integrations with third party systems.
+The following connectors are available when defining actions for alerting rules:
+
+
+
+For more information on creating connectors, refer to Connectors.
+
+
+
+
+After you select a connector, you must set the action frequency. You can choose to create a **Summary of alerts** on each check interval or on a custom interval. For example, you can send email notifications that summarize the new, ongoing, and recovered alerts every twelve hours.
+
+Alternatively, you can set the action frequency to **For each alert** and specify the conditions each alert must meet for the action to run. For example, you can send an email only when the alert status changes to critical.
+
+![Configure when a rule is triggered](../images/alert-action-frequency.png)
+
+With the **Run when** menu you can choose if an action runs for a specific severity (critical, high, medium, low), or when the alert is recovered. For example, you can add a corresponding action for each severity you want an alert for, and also for when the alert recovers.
+
+![Choose between severity or recovered](../images/slo-action-frequency.png)
+
+
+
+Use the default notification message or customize it.
+You can add more context to the message by clicking the Add variable icon and selecting from a list of available variables.
+
+![Action variables list](../images/action-variables-popup.png)
+
+The following variables are specific to this rule type.
+You can also specify [variables common to all rules](((kibana-ref))/rule-action-variables.html).
+
+
+ `context.alertDetailsUrl`
+
+ Link to the alert troubleshooting view for further context and details. This will be an empty string if the `server.publicBaseUrl` is not configured.
+
+ `context.burnRateThreshold`
+
+ The burn rate threshold value.
+
+ `context.longWindow`
+
+ The window duration with the associated burn rate value.
+
+ `context.reason`
+
+ A concise description of the reason for the alert.
+
+ `context.shortWindow`
+
+ The window duration with the associated burn rate value.
+
+ `context.sloId`
+
+ The SLO unique identifier.
+
+ `context.sloInstanceId`
+
+ The SLO instance ID.
+
+ `context.sloName`
+
+ The SLO name.
+
+ `context.timestamp`
+
+ A timestamp of when the alert was detected.
+
+ `context.viewInAppUrl`
+
+ The url to the SLO details page to help with further investigation.
+
+
+
+
+
+## Next steps
+
+Learn how to view alerts and triage SLO burn rate breaches:
+
+*
+*
diff --git a/docs/en/serverless/alerting/rate-aggregation.mdx b/docs/en/serverless/alerting/rate-aggregation.mdx
new file mode 100644
index 0000000000..f3c0a9aed1
--- /dev/null
+++ b/docs/en/serverless/alerting/rate-aggregation.mdx
@@ -0,0 +1,54 @@
+---
+id: serverlessObservabilityRateAggregation
+slug: /serverless/observability/rateAggregation
+title: Rate aggregation
+description: Analyze the rate at which a specific field changes over time.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+You can use a rate aggregation to analyze the rate at which a specific field changes over time.
+This type of aggregation is useful when you want to analyze fields like counters.
+
+For example, imagine you have a counter field called restarts that increments each time a service restarts.
+You can use rate aggregation to get an alert if the service restarts more than X times within a specific time window (for example, per day).
+
+## How rates are calculated
+
+Rates used in alerting rules are calculated by comparing the maximum value of the field in the previous bucket to the maximum value of the field in the current bucket and then dividing the result by the number of seconds in the selected interval.
+For example, if the value of the restarts increases, the rate would be calculated as:
+
+`(max_value_in_current_bucket - max_value_in_previous_bucket)/interval_in_seconds`
+
+In this example, let’s assume you have one document per bucket with the following data:
+
+
+
+```json
+{
+"timestamp": 0000,
+"restarts": 0
+}
+
+{
+"timestamp": 60000,
+"restarts": 1
+}
+```
+
+Let’s assume the timestamp is a UNIX timestamp in milliseconds,
+and we started counting on Thursday, January 1, 1970 12:00:00 AM.
+In that case, the rate will be calculated as follows:
+
+`(max_value_in_current_bucket - max_value_in_previous_bucket)/interval_in_seconds`, where:
+
+* `max_value_in_current_bucket` [now-1m → now]: 1
+* `max_value_in_previous_bucket` [now-2m → now-1m]: 0
+* `interval_in_seconds`: 60
+
+The rate calculation would be: `(1 - 0) / 60 = 0.0166666666667`
+
+If you want to alert when the rate of restarts is above 1 within a 1-minute window, you would set the threshold above `0.0166666666667`.
+
+The calculation you need to use depends on the interval that's selected.
diff --git a/docs/en/serverless/alerting/triage-slo-burn-rate-breaches.mdx b/docs/en/serverless/alerting/triage-slo-burn-rate-breaches.mdx
new file mode 100644
index 0000000000..28f645f3ff
--- /dev/null
+++ b/docs/en/serverless/alerting/triage-slo-burn-rate-breaches.mdx
@@ -0,0 +1,47 @@
+---
+id: serverlessObservabilitySloBurnRateBreaches
+slug: /serverless/observability/triage-slo-burn-rate-breaches
+title: Triage SLO burn rate breaches
+description: Triage SLO burn rate breaches to avoid exhausting your error budget and violating your SLO.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+
+
+SLO burn rate breaches occur when the percentage of bad events over a specified time period exceeds the threshold set in your .
+When this happens, you are at risk of exhausting your error budget and violating your SLO.
+
+To triage issues quickly, go to the alert details page:
+
+1. In your Observability project, go to **Alerts** (or open the SLO and click **Alerts**.)
+2. From the Alerts table, click the
+icon next to the alert and select **View alert details**.
+
+The alert details page shows information about the alert, including when the alert was triggered,
+the duration of the alert, the source SLO, and the rule that triggered the alert.
+You can follow the links to navigate to the source SLO or rule definition.
+
+Explore charts on the page to learn more about the SLO breach:
+
+![Alert details for SLO burn rate breach](../images/slo-burn-rate-breach.png)
+
+* The first chart shows the burn rate during the time range when the alert was active.
+The line indicates how close the SLO came to breaching the threshold.
+* The next chart shows the alerts history over the last 30 days.
+It shows the number of alerts that were triggered and the average time it took to recover after a breach.
+* Both timelines are annotated to show when the threshold was breached.
+You can hover over an alert icon to see the timestamp of the alert.
+
+The number, duration, and frequency of these breaches over time gives you an indication of how severely the service is degrading so that you can focus on high severity issues first.
+
+
+ The contents of the alert details page may vary depending on the type of SLI that's defined in the SLO.
+
+
+After investigating the alert, you may want to:
+
+* Click **Snooze the rule** to snooze notifications for a specific time period or indefinitely.
+* Click the icon and select **Add to case** to add the alert to a new or existing case. To learn more, refer to .
+* Click the icon and select **Mark as untracked**.
+When an alert is marked as untracked, actions are no longer generated.
+You can choose to move active alerts to this state when you disable or delete rules.
diff --git a/docs/en/serverless/alerting/triage-threshold-breaches.mdx b/docs/en/serverless/alerting/triage-threshold-breaches.mdx
new file mode 100644
index 0000000000..8c46defe83
--- /dev/null
+++ b/docs/en/serverless/alerting/triage-threshold-breaches.mdx
@@ -0,0 +1,51 @@
+---
+id: serverlessObservabilityThresholdBreaches
+slug: /serverless/observability/triage-threshold-breaches
+title: Triage threshold breaches
+description: Triage threshold breaches on the alert details page.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting' ]
+---
+
+Threshold breaches occur when an ((observability)) data type reaches or exceeds the threshold set in your .
+For example, you might have a custom threshold rule that triggers an alert when the total number of log documents with a log level of `error` reaches 100.
+
+To triage issues quickly, go to the alert details page:
+
+1. In your Observability project, go to **Alerts**.
+2. From the Alerts table, click the
+icon next to the alert and select **View alert details**.
+
+The alert details page shows information about the alert, including when the alert was triggered,
+the duration of the alert, and the last status update.
+If there is a "group by" field specified in the rule, the page also includes the source.
+You can follow the links to navigate to the rule definition.
+
+Explore charts on the page to learn more about the threshold breach:
+
+![Alert details for log threshold breach](../images/log-threshold-breach.png)
+
+
+* The page includes a chart for each condition specified in the rule.
+These charts help you understand when the breach occurred and its severity.
+* If your rule is intended to detect log threshold breaches
+(that is, it has a single condition that uses a count aggregation),
+you can run a log rate analysis, assuming you have the required license.
+Running a log rate analysis is useful for detecting significant dips or spikes in the number of logs.
+Notice that you can adjust the baseline and deviation, and then run the analysis again.
+For more information about using the log rate analysis feature,
+refer to the [AIOps Labs](((kibana-ref))/xpack-ml-aiops.html#log-rate-analysis) documentation.
+* The page may also include an alerts history chart that shows the number of triggered alerts per day for the last 30 days.
+This chart is currently only available for rules that specify a single condition.
+* Timelines on the page are annotated to show when the threshold was breached.
+You can hover over an alert icon to see the timestamp of the alert.
+
+Analyze these charts to better understand when the breach started, it's current
+state, and how the issue is trending.
+
+After investigating the alert, you may want to:
+
+* Click **Snooze the rule** to snooze notifications for a specific time period or indefinitely.
+* Click the icon and select **Add to case** to add the alert to a new or existing case. To learn more, refer to .
+* Click the icon and select **Mark as untracked**.
+When an alert is marked as untracked, actions are no longer generated.
+You can choose to move active alerts to this state when you disable or delete rules.
diff --git a/docs/en/serverless/alerting/view-alerts.mdx b/docs/en/serverless/alerting/view-alerts.mdx
new file mode 100644
index 0000000000..4f8d3bcbd7
--- /dev/null
+++ b/docs/en/serverless/alerting/view-alerts.mdx
@@ -0,0 +1,121 @@
+---
+id: serverlessObservabilityViewAlerts
+slug: /serverless/observability/view-alerts
+title: View alerts
+description: Track and manage alerts for your services and applications.
+tags: [ 'serverless', 'observability', 'how-to', 'alerting']
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+You can track and manage alerts for your applications and SLOs from the **Alerts** page. You can filter this view by alert status or time period, or search for specific alerts using KQL. Manage your alerts by adding them to cases or viewing them within the respective UIs.
+
+{/* Is this a page or dashboard? */}
+
+![Alerts page](../images/observability-alerts-view.png)
+
+## Filter alerts
+
+To help you get started with your analysis faster, use the KQL bar to create structured queries using
+[((kib)) Query Language](((kibana-ref))/kuery-query.html).
+{/* TO-DO: Fix example
+For example, `kibana.alert.rule.name : <>`.
+*/}
+
+You can use the time filter to define a specific date and time range.
+By default, this filter is set to search for the last 15 minutes.
+
+You can also filter by alert status using the buttons below the KQL bar.
+By default, this filter is set to **Show all** alerts, but you can filter to show only active, recovered or untracked alerts.
+
+## View alert details
+
+There are a few ways to inspect the details for a specific alert.
+
+From the **Alerts** table, you can click on a specific alert to open the alert detail flyout to view a summary of the alert without leaving the page.
+There you'll see the current status of the alert, its duration, and when it was last updated.
+To help you determine what caused the alert, you can view the expected and actual threshold values, and the rule that produced the alert.
+
+![Alerts detail (APM anomaly)](../images/alert-details-flyout.png)
+
+There are three common alert statuses:
+
+`active`
+ : The conditions for the rule are met and actions should be generated according to the notification settings.
+
+`flapping`
+ : The alert is switching repeatedly between active and recovered states.
+
+`recovered`
+ : The conditions for the rule are no longer met and recovery actions should be generated.
+
+`untracked`
+ : The corresponding rule is disabled or you've marked the alert as untracked. To mark the alert as untracked, go to the **Alerts** table, click the icon to expand the _More actions_ menu, and click **Mark as untracked**.
+ When an alert is marked as untracked, actions are no longer generated.
+ You can choose to move active alerts to this state when you disable or delete rules.
+
+
+The flapping state is possible only if you have enabled alert flapping detection.
+Go to the **Alerts** page and click **Manage Rules** to navigate to the ((observability)) **((rules-app))** page.
+Click **Settings** then set the look back window and threshold that are used to determine whether alerts are flapping.
+For example, you can specify that the alert must change status at least 6 times in the last 10 runs.
+If the rule has actions that run when the alert status changes, those actions are suppressed while the alert is flapping.
+
+
+{/* ![View alert details flyout on the Alerts page](images/view-observability-alerts/-observability-view-alert-details.png) */}
+
+To further inspect the rule:
+
+* From the alert detail flyout, click **View rule details**.
+* From the **Alerts** table, click the icon and select **View rule details**.
+
+To view the alert in the app that triggered it:
+
+* From the alert detail flyout, click **View in app**.
+* From the **Alerts** table, click the icon.
+
+## Customize the alerts table
+
+Use the toolbar buttons in the upper-left of the alerts table to customize the columns you want displayed:
+
+* **Columns**: Reorder the columns.
+* **_x_ fields sorted**: Sort the table by one or more columns.
+* **Fields**: Select the fields to display in the table.
+
+For example, click **Fields** and choose the `Maintenance Windows` field.
+If an alert was affected by a maintenance window, its identifier appears in the new column.
+For more information about their impact on alert notifications, refer to .
+
+{/* ![Alerts table with toolbar buttons highlighted](images/view-observability-alerts/-observability-alert-table-toolbar-buttons.png) */}
+
+You can also use the toolbar buttons in the upper-right to customize the display options or view the table in full-screen mode.
+
+## Add alerts to cases
+
+From the **Alerts** table, you can add one or more alerts to a case.
+Click the icon to add the alert to a new or existing case.
+You can add an unlimited amount of alerts from any rule type.
+
+
+Each case can have a maximum of 1,000 alerts.
+
+
+### Add an alert to a new case
+
+To add an alert to a new case:
+
+1. Select **Add to new case**.
+1. Enter a case name, add relevant tags, and include a case description.
+1. Under **External incident management system**, select a connector. If you've previously added one, that connector displays as the default selection. Otherwise, the default setting is `No connector selected`.
+1. After you've completed all of the required fields, click **Create case**. A notification message confirms you successfully created the case. To view the case details, click the notification link or go to the Cases page.
+
+### Add an alert to an existing case
+
+To add an alert to an existing case:
+
+1. Select **Add to existing case**.
+1. Select the case where you will attach the alert. A confirmation message displays.
diff --git a/docs/en/serverless/apm-agents/apm-agents-aws-lambda-functions.mdx b/docs/en/serverless/apm-agents/apm-agents-aws-lambda-functions.mdx
new file mode 100644
index 0000000000..233fe5e574
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-aws-lambda-functions.mdx
@@ -0,0 +1,46 @@
+---
+id: serverlessObservabilityApmAgentsAwsLambdaFunctions
+slug: /serverless/observability/apm-agents-aws-lambda-functions
+title: AWS Lambda functions
+description: Use Elastic APM to monitor your AWS Lambda functions.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+
+Elastic APM lets you monitor your AWS Lambda functions.
+The natural integration of distributed tracing into your AWS Lambda functions provides insights into each function's execution and runtime behavior as well as its relationships and dependencies to other services.
+
+
+
+## AWS Lambda architecture
+
+{/* comes from sandbox.elastic.dev/test-books/apm/lambda/aws-lambda-arch.mdx */}
+AWS Lambda uses a special execution model to provide a scalable, on-demand compute service for code execution. In particular, AWS freezes the execution environment of a lambda function when no active requests are being processed. This execution model poses additional requirements on APM in the context of AWS Lambda functions:
+
+1. To avoid data loss, APM data collected by APM agents needs to be flushed before the execution environment of a lambda function is frozen.
+1. Flushing APM data must be fast so as not to impact the response times of lambda function requests.
+
+To accomplish the above, Elastic APM agents instrument AWS Lambda functions and dispatch APM data via an [AWS Lambda extension](https://docs.aws.amazon.com/lambda/latest/dg/using-extensions.html).
+
+Normally, during the execution of a Lambda function, there's only a single language process running in the AWS Lambda execution environment. With an AWS Lambda extension, Lambda users run a _second_ process alongside their main service/application process.
+
+![image showing data flow from lambda function, to extension, to the managed intake service](../images/apm-agents-aws-lambda-functions-architecture.png)
+
+By using an AWS Lambda extension, Elastic APM agents can send data to a local Lambda extension process, and that process will forward data on to the managed intake service asynchronously. The Lambda extension ensures that any potential latency between the Lambda function and the managed intake service instance will not cause latency in the request flow of the Lambda function itself.
+
+## Setup
+
+To get started with monitoring AWS Lambda functions, refer to the APM agent documentation:
+
+* [Monitor AWS Lambda Node.js functions](((apm-node-ref))/lambda.html)
+* [Monitor AWS Lambda Python functions](((apm-py-ref))/lambda-support.html)
+* [Monitor AWS Lambda Java functions](((apm-java-ref))/aws-lambda.html)
+
+
+ The APM agent documentation states that you can use either an APM secret token or API key to authorize requests to the managed intake service. **However, when sending data to a project, you _must_ use an API key**.
+
+ Read more about API keys in .
+
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-elastic-apm-agents.mdx b/docs/en/serverless/apm-agents/apm-agents-elastic-apm-agents.mdx
new file mode 100644
index 0000000000..a8018be983
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-elastic-apm-agents.mdx
@@ -0,0 +1,56 @@
+---
+id: serverlessObservabilityApmAgents
+slug: /serverless/observability/apm-agents-elastic-apm-agents
+title: Elastic APM agents
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+import Go from '../transclusion/apm/guide/about/go.mdx'
+import Java from '../transclusion/apm/guide/about/java.mdx'
+import Net from '../transclusion/apm/guide/about/net.mdx'
+import Node from '../transclusion/apm/guide/about/node.mdx'
+import Php from '../transclusion/apm/guide/about/php.mdx'
+import Python from '../transclusion/apm/guide/about/python.mdx'
+import Ruby from '../transclusion/apm/guide/about/ruby.mdx'
+
+Elastic APM agents automatically measure application performance and track errors.
+They offer built-in support for popular frameworks and technologies, and provide easy-to-use APIs that allow you to instrument any application.
+
+Elastic APM agents are built and maintained by Elastic. While they are similar, different programming languages have different nuances and requirements. Select your preferred language below to learn more about how each agent works.
+
+
+
+
+
+
+
+
+
+
+
+## Minimum supported versions
+
+The following versions of Elastic APM agents are supported:
+
+| Agent name | Agent version |
+|---|---|
+| **APM AWS Lambda extension** | ≥`1.x` |
+| **Go agent** | ≥`1.x` |
+| **Java agent** | ≥`1.x` |
+| **.NET agent** | ≥`1.x` |
+| **Node.js agent** | ≥`4.x` |
+| **PHP agent** | ≥`1.x` |
+| **Python agent** | ≥`6.x` |
+| **Ruby agent** | ≥`3.x` |
+
+
+Some recently added features may require newer agent versions than those listed above.
+In these instances, the required APM agent versions will be documented with the feature.
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry-collect-metrics.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-collect-metrics.mdx
new file mode 100644
index 0000000000..f7501bd4a5
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-collect-metrics.mdx
@@ -0,0 +1,62 @@
+---
+id: serverlessObservabilityApmAgentsOtelCollectMetrics
+slug: /serverless/observability/apm-agents-opentelemetry-collect-metrics
+title: Collect metrics
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+When collecting metrics, please note that the [`DoubleValueRecorder`](https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/DoubleValueRecorder.html)
+and [`LongValueRecorder`](https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/LongValueObserver.html) metrics are not yet supported.
+
+
+Here's an example of how to capture business metrics from a Java application.
+
+```java
+// initialize metric
+Meter meter = GlobalMetricsProvider.getMeter("my-frontend");
+DoubleCounter orderValueCounter = meter.doubleCounterBuilder("order_value").build();
+
+public void createOrder(HttpServletRequest request) {
+
+ // create order in the database
+ ...
+ // increment business metrics for monitoring
+ orderValueCounter.add(orderPrice);
+}
+```
+
+See the [Open Telemetry Metrics API](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md)
+for more information.
+
+
+
+## Verify OpenTelemetry metrics data
+
+Use **Discover** to validate that metrics are successfully reported to your project.
+
+1. Open your Observability project.
+1. In your ((observability)) project, go to **Discover**, and select the **Logs Explorer** tab.
+1. Click **All logs** → **Data Views** then select **APM**.
+1. Filter the data to only show documents with metrics: `processor.name :"metric"`
+1. Narrow your search with a known OpenTelemetry field. For example, if you have an `order_value` field, add `order_value: *` to your search to return
+ only OpenTelemetry metrics documents.
+
+
+
+## Visualize
+
+Use **Lens** to create visualizations for OpenTelemetry metrics. Lens enables you to build visualizations by dragging and dropping data fields. It makes smart visualization suggestions for your data, allowing you to switch between visualization types.
+
+To get started with a new Lens visualization:
+
+1. In your ((observability)) project, go to **Visualizations**.
+1. Click **Create new visualization**.
+1. Select **Lens**.
+
+For more information on using Lens, refer to the [Lens documentation](((kibana-ref))/lens.html).
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry-limitations.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-limitations.mdx
new file mode 100644
index 0000000000..1200b13035
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-limitations.mdx
@@ -0,0 +1,53 @@
+---
+id: serverlessObservabilityApmAgentsOtelLimitations
+slug: /serverless/observability/apm-agents-opentelemetry-limitations
+title: Limitations
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+## OpenTelemetry traces
+
+* Traces of applications using `messaging` semantics might be wrongly displayed as `transactions` in the Applications UI, while they should be considered `spans` (see issue [#7001](https://github.com/elastic/apm-server/issues/7001)).
+* Inability to see Stack traces in spans.
+* Inability in APM views to view the "Time Spent by Span Type" (see issue [#5747](https://github.com/elastic/apm-server/issues/5747)).
+
+
+
+## OpenTelemetry metrics
+
+* Inability to see host metrics in the **Applications** UI when using the OpenTelemetry Collector host metrics receiver (see issue [#5310](https://github.com/elastic/apm-server/issues/5310)).
+
+
+ Even though metrics do not show up in the **Applications** view,
+ the metrics are available in your Observability project and can be visualized using **Dashboards**.
+ See for more information about visualizing OpenTelemetry metrics.
+
+
+
+
+## OpenTelemetry logs
+
+* The OpenTelemetry logs intake via Elastic is in technical preview.
+* The application logs data stream (`app_logs`) has dynamic mapping disabled. This means the automatic detection and mapping of new fields is disabled (see issue [#9093](https://github.com/elastic/apm-server/issues/9093)).
+
+
+
+## OpenTelemetry Line Protocol (OTLP)
+
+Elastic supports both the
+[(OTLP/gRPC)](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlpgrpc) and
+[(OTLP/HTTP)](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlphttp) protocol
+with ProtoBuf payload. Elastic does not yet support JSON Encoding for OTLP/HTTP.
+
+
+
+## OpenTelemetry Collector exporter for Elastic
+
+The [OpenTelemetry Collector exporter for Elastic](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter#legacy-opentelemetry-collector-exporter-for-elastic)
+has been deprecated and replaced by the native support of the OpenTelemetry Line Protocol in Elastic Observability (OTLP). To learn more, see [migration](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter#migration).
+
+The [OpenTelemetry Collector exporter for Elastic](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter)
+(which is different from the legacy exporter mentioned above) is not intended to be used with Elastic APM and Elastic Observability. Use instead.
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry-opentelemetry-apisdk-with-elastic-apm-agents.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-opentelemetry-apisdk-with-elastic-apm-agents.mdx
new file mode 100644
index 0000000000..f6d059e76f
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-opentelemetry-apisdk-with-elastic-apm-agents.mdx
@@ -0,0 +1,37 @@
+---
+id: serverlessObservabilityApmAgentsOtelApisdkWithElasticApmAgents
+slug: /serverless/observability/apm-agents-opentelemetry-opentelemetry-apisdk-with-elastic-apm-agents
+title: OpenTelemetry API/SDK with Elastic APM agents
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Use the OpenTelemetry API/SDKs with Elastic APM agents.
+Supported Elastic APM agents translate OpenTelemetry API calls to Elastic APM API calls.
+This allows you to reuse your existing instrumentation to create Elastic APM transactions and spans.
+
+
+If you'd like to use OpenTelemetry to send data directly to Elastic instead,
+see OpenTelemetry native support.
+
+
+See the relevant Elastic APM agent documentation to get started:
+
+* [Java](((apm-java-ref))/opentelemetry-bridge.html)
+* [.NET](((apm-dotnet-ref))/opentelemetry-bridge.html)
+* [Node.js](((apm-node-ref))/opentelemetry-bridge.html)
+* [Python](((apm-py-ref))/opentelemetry-bridge.html)
+
+
+
+## Next steps
+
+* Collect metrics
+* Learn about the limitations of this integration
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry-opentelemetry-native-support.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-opentelemetry-native-support.mdx
new file mode 100644
index 0000000000..10a861617c
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-opentelemetry-native-support.mdx
@@ -0,0 +1,167 @@
+---
+id: serverlessObservabilityApmAgentsOtelNativeSupport
+slug: /serverless/observability/apm-agents-opentelemetry-opentelemetry-native-support
+title: OpenTelemetry native support
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Elastic natively supports the OpenTelemetry protocol (OTLP).
+This means trace data and metrics collected from your applications and infrastructure can
+be sent directly to Elastic.
+
+* Send data to Elastic from an OpenTelemetry collector
+* Send data to Elastic from an OpenTelemetry agent
+
+
+
+## Send data from an OpenTelemetry collector
+
+Connect your OpenTelemetry collector instances to Elastic ((observability)) using the OTLP exporter:
+
+```yaml
+receivers: [^1]
+ # ...
+ otlp:
+
+processors: [^2]
+ # ...
+ memory_limiter:
+ check_interval: 1s
+ limit_mib: 2000
+ batch:
+
+exporters:
+ logging:
+ loglevel: warn [^3]
+ otlp/elastic: [^4]
+ # Elastic https endpoint without the "https://" prefix
+ endpoint: "${ELASTIC_APM_SERVER_ENDPOINT}" <5> [^7]
+ headers:
+ # Elastic API key
+ Authorization: "ApiKey ${ELASTIC_APM_API_KEY}" <6> [^7]
+
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ exporters: [logging, otlp/elastic]
+ metrics:
+ receivers: [otlp]
+ exporters: [logging, otlp/elastic]
+ logs: [^8]
+ receivers: [otlp]
+ exporters: [logging, otlp/elastic]
+```
+[^1]: The receivers, like the
+[OTLP receiver](https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver), that forward data emitted by APM agents, or the [host metrics receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver).
+[^2]: We recommend using the [Batch processor](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md) and the [memory limiter processor](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiterprocessor/README.md). For more information, see [recommended processors](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/README.md#recommended-processors).
+[^3]: The [logging exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/loggingexporter) is helpful for troubleshooting and supports various logging levels, like `debug`, `info`, `warn`, and `error`.
+[^4]: Elastic ((observability)) endpoint configuration.
+Elastic supports a ProtoBuf payload via both the OTLP protocol over gRPC transport [(OTLP/gRPC)](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlpgrpc)
+and the OTLP protocol over HTTP transport [(OTLP/HTTP)](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlphttp).
+To learn more about these exporters, see the OpenTelemetry Collector documentation:
+[OTLP/HTTP Exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) or
+[OTLP/gRPC exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlpexporter).
+[^5]: Hostname and port of the Elastic endpoint. For example, `elastic-apm-server:8200`.
+[^6]: Credential for Elastic APM API key authorization (`Authorization: "ApiKey an_api_key"`).
+[^7]: Environment-specific configuration parameters can be conveniently passed in as environment variables documented [here](https://opentelemetry.io/docs/collector/configuration/#configuration-environment-variables) (e.g. `ELASTIC_APM_SERVER_ENDPOINT` and `ELASTIC_APM_API_KEY`).
+[^8]: To send OpenTelemetry logs to your project, declare a `logs` pipeline.
+
+You're now ready to export traces and metrics from your services and applications.
+
+
+When using the OpenTelemetry collector, you should always prefer sending data via the [`OTLP` exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter).
+Using other methods, like the [`elasticsearch` exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter), will bypass all of the validation and data processing that Elastic performs.
+In addition, your data will not be viewable in your Observability project if you use the `elasticsearch` exporter.
+
+
+
+
+## Send data from an OpenTelemetry agent
+
+To export traces and metrics to Elastic, instrument your services and applications
+with the OpenTelemetry API, SDK, or both. For example, if you are a Java developer, you need to instrument your Java app with the
+[OpenTelemetry agent for Java](https://github.com/open-telemetry/opentelemetry-java-instrumentation).
+See the [OpenTelemetry Instrumentation guides](https://opentelemetry.io/docs/instrumentation/) to download the
+OpenTelemetry Agent or SDK for your language.
+
+Define environment variables to configure the OpenTelemetry agent and enable communication with Elastic APM.
+For example, if you are instrumenting a Java app, define the following environment variables:
+
+```bash
+export OTEL_RESOURCE_ATTRIBUTES=service.name=checkoutService,service.version=1.1,deployment.environment=production
+export OTEL_EXPORTER_OTLP_ENDPOINT=https://apm_server_url:8200
+export OTEL_EXPORTER_OTLP_HEADERS="Authorization=ApiKey an_apm_api_key"
+export OTEL_METRICS_EXPORTER="otlp" \
+export OTEL_LOGS_EXPORTER="otlp" \ [^1]
+java -javaagent:/path/to/opentelemetry-javaagent-all.jar \
+ -classpath lib/*:classes/ \
+ com.mycompany.checkout.CheckoutServiceServer
+```
+[^1]: The OpenTelemetry logs intake via Elastic is currently in technical preview.
+
+
+
+
+ `OTEL_RESOURCE_ATTRIBUTES`
+ Fields that describe the service and the environment that the service runs in. See resource attributes for more information.
+
+
+ `OTEL_EXPORTER_OTLP_ENDPOINT`
+ Elastic URL. The host and port that Elastic listens for APM events on.
+
+
+ `OTEL_EXPORTER_OTLP_HEADERS`
+
+ Authorization header that includes the Elastic APM API key: `"Authorization=ApiKey an_api_key"`.
+
+
+
+ `OTEL_METRICS_EXPORTER`
+ Metrics exporter to use. See [exporter selection](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#exporter-selection) for more information.
+
+
+ `OTEL_LOGS_EXPORTER`
+ Logs exporter to use. See [exporter selection](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#exporter-selection) for more information.
+
+
+
+You are now ready to collect traces and metrics before verifying metrics
+and visualizing metrics.
+
+
+
+## Proxy requests to Elastic
+
+Elastic supports both the [(OTLP/gRPC)](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlpgrpc) and [(OTLP/HTTP)](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlphttp) protocol on the same port as Elastic APM agent requests. For ease of setup, we recommend using OTLP/HTTP when proxying or load balancing requests to Elastic.
+
+If you use the OTLP/gRPC protocol, requests to Elastic must use either HTTP/2 over TLS or HTTP/2 Cleartext (H2C). No matter which protocol is used, OTLP/gRPC requests will have the header: `"Content-Type: application/grpc"`.
+
+When using a layer 7 (L7) proxy like AWS ALB, requests must be proxied in a way that ensures requests to Elastic follow the rules outlined above. For example, with ALB you can create rules to select an alternative backend protocol based on the headers of requests coming into ALB. In this example, you'd select the gRPC protocol when the `"Content-Type: application/grpc"` header exists on a request.
+
+For more information on how to configure an AWS ALB to support gRPC, see this AWS blog post:
+[Application Load Balancer Support for End-to-End HTTP/2 and gRPC](https://aws.amazon.com/blogs/aws/new-application-load-balancer-support-for-end-to-end-http-2-and-grpc/).
+
+For more information on how Elastic services gRPC requests, see
+[Muxing gRPC and HTTP/1.1](https://github.com/elastic/apm-server/blob/main/dev_docs/otel.md#muxing-grpc-and-http11).
+
+
+
+## Next steps
+
+* Collect metrics
+* Add Resource attributes
+* Learn about the limitations of this integration
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry-other-execution-environments.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-other-execution-environments.mdx
new file mode 100644
index 0000000000..4f96592903
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-other-execution-environments.mdx
@@ -0,0 +1,22 @@
+---
+id: serverlessObservabilityOtelLambdaSupport
+slug: /serverless/observability/opentelemetry-aws-lambda-support
+title: AWS Lambda support
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+AWS Lambda functions can be instrumented with OpenTelemetry and monitored with Elastic ((observability)).
+
+To get started, follow the official AWS Distro for OpenTelemetry Lambda [getting started documentation](https://aws-otel.github.io/docs/getting-started/lambda) and configure the OpenTelemetry Collector to output traces and metrics to your Elastic cluster.
+
+
+
+## Next steps
+
+* Collect metrics
+* Add Resource attributes
+* Learn about the limitations of this integration
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry-resource-attributes.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-resource-attributes.mdx
new file mode 100644
index 0000000000..f13f8cb8d5
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry-resource-attributes.mdx
@@ -0,0 +1,49 @@
+---
+id: serverlessObservabilityApmAgentsOtelResourceAttributes
+slug: /serverless/observability/apm-agents-opentelemetry-resource-attributes
+title: Resource attributes
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+A resource attribute is a key/value pair containing information about the entity producing telemetry.
+Resource attributes are mapped to Elastic Common Schema (ECS) fields like `service.*`, `cloud.*`, `process.*`, etc.
+These fields describe the service and the environment that the service runs in.
+
+The examples shown here set the Elastic (ECS) `service.environment` field for the resource, i.e. service, that is producing trace events.
+Note that Elastic maps the OpenTelemetry `deployment.environment` field to
+the ECS `service.environment` field on ingestion.
+
+**OpenTelemetry agent**
+
+Use the `OTEL_RESOURCE_ATTRIBUTES` environment variable to pass resource attributes at process invocation.
+
+```bash
+export OTEL_RESOURCE_ATTRIBUTES=deployment.environment=production
+```
+
+**OpenTelemetry collector**
+
+Use the [resource processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourceprocessor) to set or apply changes to resource attributes.
+
+```yaml
+...
+processors:
+ resource:
+ attributes:
+ - key: deployment.environment
+ action: insert
+ value: production
+...
+```
+
+
+
+Need to add event attributes instead?
+Use attributes—not to be confused with resource attributes—to add data to span, log, or metric events.
+Attributes can be added as a part of the OpenTelemetry instrumentation process or with the [attributes processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/attributesprocessor).
+
+
+
diff --git a/docs/en/serverless/apm-agents/apm-agents-opentelemetry.mdx b/docs/en/serverless/apm-agents/apm-agents-opentelemetry.mdx
new file mode 100644
index 0000000000..81a44b85f0
--- /dev/null
+++ b/docs/en/serverless/apm-agents/apm-agents-opentelemetry.mdx
@@ -0,0 +1,57 @@
+---
+id: serverlessObservabilityApmAgentsOtel
+slug: /serverless/observability/apm-agents-opentelemetry
+title: OpenTelemetry
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+import DiagramsApmOtelArchitecture from '../transclusion/apm/guide/diagrams/apm-otel-architecture.mdx'
+
+[OpenTelemetry](https://opentelemetry.io/docs/concepts/what-is-opentelemetry/)
+is a set of APIs, SDKs, tooling, and integrations that enable the capture and management of
+telemetry data from your services and applications. For more information about the
+OpenTelemetry project, see the [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/master/README.md).
+
+## OpenTelemetry and Elastic
+
+{/* TODO: Fix diagram */}
+
+
+Elastic integrates with OpenTelemetry, allowing you to reuse your existing instrumentation
+to easily send observability data to Elastic.
+There are several ways to integrate OpenTelemetry with Elastic:
+
+**OpenTelemetry API/SDK with Elastic APM agents**
+
+To unlock the full power of Elastic, use the OpenTelemetry API/SDKs with Elastic APM agents,
+currently supported by the Java, Python, .NET, and Node.js agents.
+These Elastic APM agents translate OpenTelemetry API calls to Elastic APM API calls.
+This allows you to reuse your existing instrumentation to create Elastic APM transactions and spans—avoiding vendor lock-in and having to redo manual instrumentation.
+
+Get started →
+
+**OpenTelemetry agent**
+
+Elastic natively supports the OpenTelemetry protocol (OTLP).
+This means trace data and metrics collected from your applications and infrastructure by an
+OpenTelemetry agent can be sent directly to Elastic.
+
+Get started →
+
+**OpenTelemetry collector**
+
+Elastic natively supports the OpenTelemetry protocol (OTLP).
+This means trace data and metrics collected from your applications and infrastructure by an
+OpenTelemetry collector can be sent directly to Elastic.
+
+Get started →
+
+**Lambda collector exporter**
+
+AWS Lambda functions can be instrumented with OpenTelemetry and monitored with Elastic ((observability)).
+
+Get started →
+
diff --git a/docs/en/serverless/apm/apm-compress-spans.mdx b/docs/en/serverless/apm/apm-compress-spans.mdx
new file mode 100644
index 0000000000..5dd3f7d484
--- /dev/null
+++ b/docs/en/serverless/apm/apm-compress-spans.mdx
@@ -0,0 +1,72 @@
+---
+id: serverlessObservabilityApmCompressSpans
+slug: /serverless/observability/apm-compress-spans
+title: Compress spans
+description: Compress similar or identical spans to reduce storage overhead, processing power needed, and clutter in the Applications UI.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+In some cases, APM agents may collect large amounts of very similar or identical spans in a transaction.
+For example, this can happen if spans are captured inside a loop or in unoptimized SQL queries that use multiple
+queries instead of joins to fetch related data.
+
+In such cases, the upper limit of spans per transaction (by default, 500 spans) can be reached quickly, causing the agent to stop capturing potentially more relevant spans for a given transaction.
+
+Capturing similar or identical spans often isn't helpful, especially if they are of very short duration.
+They can also clutter the UI, and cause processing and storage overhead.
+
+To address this problem, APM agents can compress similar spans into a single span.
+The compressed span retains most of the original span information, including the overall duration and number of spans it represents.
+
+Regardless of the compression strategy, a span is eligible for compression if:
+
+- It has not propagated its trace context.
+- It is an _exit_ span (such as database query spans).
+- Its outcome is not `"failure"`.
+
+## Compression strategies
+
+The ((apm-agent)) selects between two strategies to decide if adjacent spans can be compressed.
+In both strategies, only one previous span needs to be kept in memory.
+This ensures that the agent doesn't require large amounts of memory to enable span compression.
+
+### Same-Kind strategy
+
+The agent uses the same-kind strategy if two adjacent spans have the same:
+
+ * span type
+ * span subtype
+ * `destination.service.resource` (e.g. database name)
+
+### Exact-Match strategy
+
+The agent uses the exact-match strategy if two adjacent spans have the same:
+
+ * span name
+ * span type
+ * span subtype
+ * `destination.service.resource` (e.g. database name)
+
+## Settings
+
+You can specify the maximum span duration in the agent's configuration settings.
+Spans with a duration longer than the specified value will not be compressed.
+
+For the "Same-Kind" strategy, the default maximum span duration is 0 milliseconds, which means that
+the "Same-Kind" strategy is disabled by default.
+For the "Exact-Match" strategy, the default limit is 50 milliseconds.
+
+### Agent support
+
+Support for span compression is available in the following agents and can be configured
+using the options listed below:
+
+| Agent | Same-kind config | Exact-match config |
+|---|---|---|
+| **Go agent** | [`ELASTIC_APM_SPAN_COMPRESSION_SAME_KIND_MAX_DURATION`](((apm-go-ref-v))/configuration.html#config-span-compression-same-kind-duration) | [`ELASTIC_APM_SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION`](((apm-go-ref-v))/configuration.html#config-span-compression-exact-match-duration) |
+| **Java agent** | [`span_compression_same_kind_max_duration`](((apm-java-ref-v))/config-huge-traces.html#config-span-compression-same-kind-max-duration) | [`span_compression_exact_match_max_duration`](((apm-java-ref-v))/config-huge-traces.html#config-span-compression-exact-match-max-duration) |
+| **.NET agent** | [`SpanCompressionSameKindMaxDuration`](((apm-dotnet-ref-v))/config-core.html#config-span-compression-same-kind-max-duration) | [`SpanCompressionExactMatchMaxDuration`](((apm-dotnet-ref-v))/config-core.html#config-span-compression-exact-match-max-duration) |
+| **Node.js agent** | [`spanCompressionSameKindMaxDuration`](((apm-node-ref-v))/configuration.html#span-compression-same-kind-max-duration) | [`spanCompressionExactMatchMaxDuration`](((apm-node-ref-v))/configuration.html#span-compression-exact-match-max-duration) |
+| **Python agent** | [`span_compression_same_kind_max_duration`](((apm-py-ref-v))/configuration.html#config-span-compression-same-kind-max-duration) | [`span_compression_exact_match_max_duration`](((apm-py-ref-v))/configuration.html#config-span-compression-exact-match-max_duration) |
diff --git a/docs/en/serverless/apm/apm-create-custom-links.mdx b/docs/en/serverless/apm/apm-create-custom-links.mdx
new file mode 100644
index 0000000000..437d1f9bfb
--- /dev/null
+++ b/docs/en/serverless/apm/apm-create-custom-links.mdx
@@ -0,0 +1,205 @@
+---
+id: serverlessObservabilityApmCreateCustomLinks
+slug: /serverless/observability/apm-create-custom-links
+title: Create custom links
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Elastic's custom link feature allows you to easily create up to 500 dynamic links
+based on your specific APM data.
+Custom links can be filtered to only appear for relevant services,
+environments, transaction types, or transaction names.
+
+Ready to dive in? Jump straight to the examples.
+
+## Create a link
+
+Each custom link consists of a label, URL, and optional filter.
+The easiest way to create a custom link is from within the actions dropdown in the transaction detail page.
+This method will automatically apply filters, scoping the link to that specific service,
+environment, transaction type, and transaction name.
+
+Alternatively, you can create a custom link by navigating to any page within **Applications** and selecting **Settings** → **Custom Links** → **Create custom link**.
+
+### Label
+
+The name of your custom link.
+The actions context menu displays this text, so keep it as short as possible.
+
+
+Custom links are displayed alphabetically in the actions menu.
+
+
+### URL
+
+The URL your link points to.
+URLs support dynamic field name variables, encapsulated in double curly brackets: `{{field.name}}`.
+These variables will be replaced with transaction metadata when the link is clicked.
+
+Because everyone's data is different,
+you'll need to examine your traces to see what metadata is available for use.
+To do this, select a trace and click **Metadata** in the **Trace Sample** table.
+
+![Example metadata](images/custom-links/example-metadata.png)
+
+### Filters
+
+Filter each link to only appear for specific services or transactions.
+You can filter on the following fields:
+
+* `service.name`
+* `service.env`
+* `transaction.type`
+* `transaction.name`
+
+Multiple values are allowed when comma-separated.
+
+## Custom link examples
+
+Not sure where to start with custom links?
+Take a look at the examples below and customize them to your liking!
+
+### Email
+
+Email the owner of a service.
+
+{/* TODO: If we change these to Docsmobile tables they might look better */}
+
+| | |
+|---|---|
+| Label | `Email engineer` |
+| Link | `mailto:@.com` |
+| Filters | `service.name:` |
+
+**Example**
+
+This link opens an email addressed to the team or owner of `python-backend`.
+It will only appear on services with the name `python-backend`.
+
+| | |
+|---|---|
+| Label | `Email python-backend engineers` |
+| Link | `mailto:python_team@elastic.co` |
+| Filters | `service.name:python-backend` |
+
+### GitHub issue
+
+Open a GitHub issue with prepopulated metadata from the selected trace sample.
+
+| | |
+|---|---|
+| Label | `Open an issue in ` |
+| Link | `https://github.com///issues/new?title=&body=` |
+| Filters | `service.name:client` |
+
+**Example**
+
+This link opens a new GitHub issue in the apm-agent-rum repository.
+It populates the issue body with relevant metadata from the currently active trace.
+Clicking this link results in the following issue being created:
+
+![Example github issue](images/custom-links/create-github-issue.png)
+
+| | |
+|---|---|
+| Label | `Open an issue in apm-rum-js` |
+| Link | `https://github.com/elastic/apm-agent-rum-js/issues/new?title=Investigate+APM+trace&body=Investigate+the+following+APM+trace%3A%0D%0A%0D%0Aservice.name%3A+{{service.name}}%0D%0Atransaction.id%3A+{{transaction.id}}%0D%0Acontainer.id%3A+{{container.id}}%0D%0Aurl.full%3A+{{url.full}}` |
+| Filters | `service.name:client` |
+
+See the [GitHub automation documentation](https://help.github.com/en/github/managing-your-work-on-github/about-automation-for-issues-and-pull-requests-with-query-parameters) for a full list of supported query parameters.
+
+
+
+### Jira task
+
+Create a Jira task with prepopulated metadata from the selected trace sample.
+
+| | |
+|---|---|
+| Label | `Open an issue in Jira` |
+| Link | `https:///secure/CreateIssueDetails!init.jspa?` |
+
+**Example**
+
+This link creates a new task on the Engineering board in Jira.
+It populates the issue body with relevant metadata from the currently active trace.
+Clicking this link results in the following task being created in Jira:
+
+![Example jira issue](images/custom-links/create-jira-issue.png)
+
+| | |
+|---|---|
+| Label | `Open a task in Jira` |
+| Link | `https://test-site-33.atlassian.net/secure/CreateIssueDetails!init.jspa?pid=10000&issuetype=10001&summary=Created+via+APM&description=Investigate+the+following+APM+trace%3A%0D%0A%0D%0Aservice.name%3A+{{service.name}}%0D%0Atransaction.id%3A+{{transaction.id}}%0D%0Acontainer.id%3A+{{container.id}}%0D%0Aurl.full%3A+{{url.full}}` |
+
+See the [Jira application administration knowledge base](https://confluence.atlassian.com/jirakb/how-to-create-issues-using-direct-html-links-in-jira-server-159474.html)
+for a full list of supported query parameters.
+
+### Dashboards
+
+Link to a custom dashboard.
+
+| | |
+|---|---|
+| Label | `Open transaction in custom visualization` |
+| Link | `https://kibana-instance/app/kibana#/dashboard?_g=query:(language:kuery,query:'transaction.id:{{transaction.id}}'...` |
+
+**Example**
+
+This link opens the current `transaction.id` in a custom dashboard.
+There are no filters set.
+
+| | |
+|---|---|
+| Label | `Open transaction in Python drilldown viz` |
+| URL | `https://kibana-instance/app/kibana#/dashboard?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-24h,to:now))&_a=(description:'',filters:!(),fullScreenMode:!f,options:(hidePanelTitles:!f,useMargins:!t),panels:!((embeddableConfig:(),gridData:(h:15,i:cb79c1c0-1af8-472c-aaf7-d158a76946fb,w:24,x:0,y:0),id:c8c74b20-6a30-11ea-92ab-b5d3feff11df,panelIndex:cb79c1c0-1af8-472c-aaf7-d158a76946fb,type:visualization,version:'7.7')),query:(language:kuery,query:'transaction.id:{{transaction.id}}'),timeRestore:!f,title:'',viewMode:edit)` |
+
+### Slack channel
+
+Open a specified slack channel.
+
+| | |
+|---|---|
+| Label | `Open SLACK_CHANNEL` |
+| Link | `https://COMPANY_SLACK.slack.com/archives/SLACK_CHANNEL` |
+| Filters | `service.name` : `SERVICE_NAME` |
+
+**Example**
+
+This link opens a company slack channel, #apm-user-support.
+It only appears when `transaction.name` is `GET user/login`.
+
+| | |
+|---|---|
+| Label | `Open #apm-user-support` |
+| Link | `https://COMPANY_SLACK.slack.com/archives/efk52kt23k` |
+| Filters | `transaction.name:GET user/login` |
+
+### Website
+
+Open an internal or external website.
+
+| | |
+|---|---|
+| Label | `Open ` |
+| Link | `https://.slack.com/archives/` |
+| Filters | `service.name:` |
+
+**Example**
+
+This link opens more data on a specific `user.email`.
+It only appears on front-end transactions.
+
+| | |
+|---|---|
+| Label | `View user internally` |
+| Link | `https://internal-site.company.com/user/{{user.email}}` |
+| Filters | `service.name:client` |
+
diff --git a/docs/en/serverless/apm/apm-data-types.mdx b/docs/en/serverless/apm/apm-data-types.mdx
new file mode 100644
index 0000000000..47a031ce2c
--- /dev/null
+++ b/docs/en/serverless/apm/apm-data-types.mdx
@@ -0,0 +1,22 @@
+---
+id: serverlessObservabilityApmDataTypes
+slug: /serverless/observability/apm-data-types
+title: APM data types
+description: Learn about the various APM data types.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Elastic APM agents capture different types of information from within their instrumented applications.
+These are known as events, and can be spans, transactions, errors, or metrics:
+
+* **Spans** contain information about the execution of a specific code path.
+They measure from the start to the end of an activity, and they can have a parent/child
+relationship with other spans.
+* **Transactions** are a special kind of _span_ that have additional attributes associated with them.
+They describe an event captured by an Elastic ((apm-agent)) instrumenting a service.
+You can think of transactions as the highest level of work you’re measuring within a service.
+* **Errors** contain at least information about the original `exception` that occurred or about
+a `log` created when the exception occurred. For simplicity, errors are represented by a unique ID.
+* **Metrics** measure the state of a system by gathering information on a regular interval.
diff --git a/docs/en/serverless/apm/apm-distributed-tracing.mdx b/docs/en/serverless/apm/apm-distributed-tracing.mdx
new file mode 100644
index 0000000000..8e412415bd
--- /dev/null
+++ b/docs/en/serverless/apm/apm-distributed-tracing.mdx
@@ -0,0 +1,107 @@
+---
+id: serverlessObservabilityApmDistributedTracing
+slug: /serverless/observability/apm-distributed-tracing
+title: Distributed tracing
+description: Understand how a single request that travels through multiple services impacts your application.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import TabWidgetsDistributedTraceSendWidget from '../transclusion/apm/guide/tab-widgets/distributed-trace-send-widget.mdx'
+import TabWidgetsDistributedTraceReceiveWidget from '../transclusion/apm/guide/tab-widgets/distributed-trace-receive-widget.mdx'
+
+A `trace` is a group of transactions and spans with a common root.
+Each `trace` tracks the entirety of a single request.
+When a `trace` travels through multiple services, as is common in a microservice architecture,
+it is known as a distributed trace.
+
+## Why is distributed tracing important?
+
+Distributed tracing enables you to analyze performance throughout your microservice architecture
+by tracing the entirety of a request — from the initial web request on your front-end service
+all the way to database queries made on your back-end services.
+
+Tracking requests as they propagate through your services provides an end-to-end picture of
+where your application is spending time, where errors are occurring, and where bottlenecks are forming.
+Distributed tracing eliminates individual service's data silos and reveals what's happening outside of
+service borders.
+
+For supported technologies, distributed tracing works out-of-the-box, with no additional configuration required.
+
+## How distributed tracing works
+
+Distributed tracing works by injecting a custom `traceparent` HTTP header into outgoing requests.
+This header includes information, like `trace-id`, which is used to identify the current trace,
+and `parent-id`, which is used to identify the parent of the current span on incoming requests
+or the current span on an outgoing request.
+
+When a service is working on a request, it checks for the existence of this HTTP header.
+If it's missing, the service starts a new trace.
+If it exists, the service ensures the current action is added as a child of the existing trace,
+and continues to propagate the trace.
+
+### Trace propagation examples
+
+In this example, Elastic's Ruby agent communicates with Elastic's Java agent.
+Both support the `traceparent` header, and trace data is successfully propagated.
+
+![How traceparent propagation works](images/distributed-tracing/dt-trace-ex1.png)
+
+In this example, Elastic's Ruby agent communicates with OpenTelemetry's Java agent.
+Both support the `traceparent` header, and trace data is successfully propagated.
+
+![How traceparent propagation works](images/distributed-tracing/dt-trace-ex2.png)
+
+In this example, the trace meets a piece of middleware that doesn't propagate the `traceparent` header.
+The distributed trace ends and any further communication will result in a new trace.
+
+![How traceparent propagation works](images/distributed-tracing/dt-trace-ex3.png)
+
+### W3C Trace Context specification
+
+All Elastic agents now support the official W3C Trace Context specification and `traceparent` header.
+See the table below for the minimum required agent version:
+
+| Agent name | Agent Version |
+|---|---|
+| **Go Agent** | ≥`1.6` |
+| **Java Agent** | ≥`1.14` |
+| **.NET Agent** | ≥`1.3` |
+| **Node.js Agent** | ≥`3.4` |
+| **PHP Agent** | ≥`1.0` |
+| **Python Agent** | ≥`5.4` |
+| **Ruby Agent** | ≥`3.5` |
+
+
+Older Elastic agents use a unique `elastic-apm-traceparent` header.
+For backward-compatibility purposes, new versions of Elastic agents still support this header.
+
+
+## Visualize distributed tracing
+
+APM's timeline visualization provides a visual deep-dive into each of your application's traces:
+
+![Example view of the distributed tracing in Elastic APM](images/spans/apm-distributed-tracing.png)
+
+## Manual distributed tracing
+
+Elastic agents automatically propagate distributed tracing context for supported technologies.
+If your service communicates over a different, unsupported protocol,
+you can manually propagate distributed tracing context from a sending service to a receiving service
+with each agent's API.
+
+### Add the `traceparent` header to outgoing requests
+
+Sending services must add the `traceparent` header to outgoing requests.
+
+
+
+
+
+### Parse the `traceparent` header on incoming requests
+
+Receiving services must parse the incoming `traceparent` header,
+and start a new transaction or span as a child of the received context.
+
+
diff --git a/docs/en/serverless/apm/apm-filter-your-data.mdx b/docs/en/serverless/apm/apm-filter-your-data.mdx
new file mode 100644
index 0000000000..b8b147c08f
--- /dev/null
+++ b/docs/en/serverless/apm/apm-filter-your-data.mdx
@@ -0,0 +1,47 @@
+---
+id: serverlessObservabilityApmFilterYourData
+slug: /serverless/observability/apm-filter-your-data
+title: Filter your data
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+Global filters are ways you can filter your APM data based on a specific
+time range or environment. When viewing a specific service, the filter persists
+as you move between tabs.
+
+![Global filters view](images/filters/global-filters.png)
+
+
+
+If you prefer to use advanced queries on your data to filter on specific pieces
+of information, see Query your data.
+
+
+
+## Global time range
+
+The global time range filter restricts APM data to a specific time period.
+
+## Service environment filter
+
+The environment selector is a global filter for `service.environment`.
+It allows you to view only relevant data and is especially useful for separating development from production environments.
+By default, all environments are displayed. If there are no environment options, you'll see "not defined".
+
+Service environments are defined when configuring your APM agents.
+It's vital to be consistent when naming environments in your APM agents.
+To learn how to configure service environments, see the specific APM agent documentation:
+
+* **Go:** [`ELASTIC_APM_ENVIRONMENT`](((apm-go-ref))/configuration.html#config-environment)
+* **Java:** [`environment`](((apm-java-ref))/config-core.html#config-environment)
+* **.NET:** [`Environment`](((apm-dotnet-ref))/config-core.html#config-environment)
+* **Node.js:** [`environment`](((apm-node-ref))/configuration.html#environment)
+* **PHP:** [`environment`](((apm-php-ref))/configuration-reference.html#config-environment)
+* **Python:** [`environment`](((apm-py-ref))/configuration.html#config-environment)
+* **Ruby:** [`environment`](((apm-ruby-ref))/configuration.html#config-environment)
+{/* * **iOS agent:** _Not yet supported_ */}
+{/* * **Real User Monitoring:** [`environment`](((apm-rum-ref))/configuration.html#environment) */}
+
diff --git a/docs/en/serverless/apm/apm-find-transaction-latency-and-failure-correlations.mdx b/docs/en/serverless/apm/apm-find-transaction-latency-and-failure-correlations.mdx
new file mode 100644
index 0000000000..36ecdad669
--- /dev/null
+++ b/docs/en/serverless/apm/apm-find-transaction-latency-and-failure-correlations.mdx
@@ -0,0 +1,99 @@
+---
+id: serverlessObservabilityApmFindTransactionLatencyAndFailureCorrelations
+slug: /serverless/observability/apm-find-transaction-latency-and-failure-correlations
+title: Find transaction latency and failure correlations
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+Correlations surface attributes of your data that are potentially correlated
+with high-latency or erroneous transactions. For example, if you are a site
+reliability engineer who is responsible for keeping production systems up and
+running, you want to understand what is causing slow transactions. Identifying
+attributes that are responsible for higher latency transactions can potentially
+point you toward the root cause. You may find a correlation with a particular
+piece of hardware, like a host or pod. Or, perhaps a set of users, based on IP
+address or region, is facing increased latency due to local data center issues.
+
+To find correlations:
+
+1. In your ((observability)) project, go to **Applications** → **Services**.
+1. Select a service.
+1. Select the **Transactions** tab.
+1. Select a transaction group in the **Transactions** table.
+
+
+Active queries _are_ applied to correlations.
+
+
+## Find high transaction latency correlations
+
+The correlations on the **Latency correlations** tab help you discover which
+attributes are contributing to increased transaction latency.
+
+![APM latency correlations](images/transactions/correlations-hover.png)
+
+The progress bar indicates the status of the asynchronous analysis, which
+performs statistical searches across a large number of attributes. For large
+time ranges and services with high transaction throughput, this might take some
+time. To improve performance, reduce the time range.
+
+The latency distribution chart visualizes the overall latency of the
+transactions in the transaction group. If there are attributes that have a
+statistically significant correlation with slow response times, they are listed
+in a table below the chart. The table is sorted by correlation coefficients that
+range from 0 to 1. Attributes with higher correlation values are more likely to
+contribute to high latency transactions. By default, the attribute with the
+highest correlation value is added to the chart. To see the latency distribution
+for other attributes, select their row in the table.
+
+If a correlated attribute seems noteworthy, use the **Filter** quick links:
+
+* `+` creates a new query in the Applications UI for filtering transactions containing
+ the selected value.
+
+* `-` creates a new query in the Applications UI to filter out transactions containing
+ the selected value.
+
+You can also click the icon beside the field name to view and filter its most
+popular values.
+
+In this example screenshot, there are transactions that are skewed to the right
+with slower response times than the overall latency distribution. If you select
+the `+` filter in the appropriate row of the table, it creates a new query in
+the Applications UI for transactions with this attribute. With the "noise" now
+filtered out, you can begin viewing sample traces to continue your investigation.
+
+
+
+## Find failed transaction correlations
+
+The correlations on the **Failed transaction correlations** tab help you discover
+which attributes are most influential in distinguishing between transaction
+failures and successes. In this context, the success or failure of a transaction
+is determined by its [event.outcome](((ecs-ref))/ecs-event.html#field-event-outcome)
+value. For example, APM agents set the `event.outcome` to `failure` when an HTTP
+transaction returns a `5xx` status code.
+
+The chart highlights the failed transactions in the overall latency distribution
+for the transaction group. If there are attributes that have a statistically
+significant correlation with failed transactions, they are listed in a table.
+The table is sorted by scores, which are mapped to high, medium, or low impact
+levels. Attributes with high impact levels are more likely to contribute to
+failed transactions. By default, the attribute with the highest score is added
+to the chart. To see a different attribute in the chart, select its row in the
+table.
+
+For example, in the screenshot below, there are attributes such as a specific
+node and pod name that have medium impact on the failed transactions.
+
+![Failed transaction correlations](images/correlations/correlations-failed-transactions.png)
+
+Select the `+` filter to create a new query in the Applications UI for transactions
+with one or more of these attributes. If you are unfamiliar with a field, click
+the icon beside its name to view its most popular values and optionally filter
+on those values too. Each time that you add another attribute, it is filtering
+out more and more noise and bringing you closer to a diagnosis.
+
diff --git a/docs/en/serverless/apm/apm-get-started.mdx b/docs/en/serverless/apm/apm-get-started.mdx
new file mode 100644
index 0000000000..3bc8b79533
--- /dev/null
+++ b/docs/en/serverless/apm/apm-get-started.mdx
@@ -0,0 +1,138 @@
+---
+id: serverlessObservabilityApmGetStarted
+slug: /serverless/observability/apm-get-started
+title: Get started with traces and APM
+description: Learn how to collect Application Performance Monitoring (APM) data and visualize it in real time.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+import Go from '../transclusion/apm/guide/install-agents/go.mdx'
+import Java from '../transclusion/apm/guide/install-agents/java.mdx'
+import Net from '../transclusion/apm/guide/install-agents/net.mdx'
+import Node from '../transclusion/apm/guide/install-agents/node.mdx'
+import Php from '../transclusion/apm/guide/install-agents/php.mdx'
+import Python from '../transclusion/apm/guide/install-agents/python.mdx'
+import Ruby from '../transclusion/apm/guide/install-agents/ruby.mdx'
+import OpenTelemetry from '../transclusion/apm/guide/open-telemetry/otel-get-started.mdx'
+
+In this guide you'll learn how to collect and send Application Performance Monitoring (APM) data
+to Elastic, then explore and visualize the data in real time.
+
+
+
+## Step 1: Add data
+
+You'll use APM agents to send APM data from your application to Elastic. Elastic offers APM agents
+written in several languages and supports OpenTelemetry. Which agent you'll use depends on the language used in your service.
+
+To send APM data to Elastic, you must install an APM agent and configure it to send data to
+your project:
+
+1. Create a new ((observability)) project, or open an existing one.
+1. To install and configure one or more APM agents, do one of following:
+ * In your Observability project, go to **Add data** → **Monitor my application performance** → **Elastic APM** and follow the prompts.
+ * Use the following instructions:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ While there are many configuration options, all APM agents require:
+
+
+
+ **Service name**
+
+ The APM integration maps an instrumented service's name — defined in
+ each ((apm-agent))'s configuration — to the index where its data is stored.
+ Service names are case-insensitive and must be unique.
+
+ For example, you cannot have a service named `Foo` and another named `foo`.
+ Special characters will be removed from service names and replaced with underscores (`_`).
+
+
+
+ **Server URL**
+
+ The host and port that the managed intake service listens for events on.
+
+ To find the URL for your project:
+
+ 1. Go to the [Cloud console](https://cloud.elastic.co/).
+ 1. Next to your project, select **Manage**.
+ 1. Next to _Endpoints_, select **View**.
+ 1. Copy the _APM endpoint_.
+
+
+
+ **API key**
+
+ Authentication method for communication between ((apm-agent)) and the managed intake service.
+
+ You can create and delete API keys in Applications Settings:
+ 1. Go to any page in the _Applications_ section of the main menu.
+ 1. Click **Settings** in the top bar.
+ 1. Go to the **Agent keys** tab.
+
+
+
+ **Environment**
+
+ The name of the environment this service is deployed in, for example "production" or "staging".
+
+ Environments allow you to easily filter data on a global level in the UI.
+ It's important to be consistent when naming environments across agents.
+
+
+
+
+1. If you're using the step-by-step instructions in the UI, after you've installed and configured an agent,
+you can click **Check Agent Status** to verify that the agent is sending data.
+
+To learn more about APM agents, including how to fine-tune how agents send traces to Elastic,
+refer to .
+
+
+
+## Step 2: View your data
+
+After one or more APM agents are installed and successfully sending data, you can view
+application performance monitoring data in the UI.
+
+In the _Applications_ section of the main menu, select **Services**.
+This will show a high-level overview of the health and general performance of all your services.
+
+Learn more about visualizing APM data in .
+
+{/* TO DO: ADD SCREENSHOT */}
+
+
+Not seeing any data? Find helpful tips in Troubleshooting.
+
+
+## Next steps
+
+Now that data is streaming into your project, take your investigation to a
+deeper level. Learn how to use Elastic's built-in visualizations for APM data,
+alert on APM data,
+or fine-tune how agents send traces to Elastic.
diff --git a/docs/en/serverless/apm/apm-integrate-with-machine-learning.mdx b/docs/en/serverless/apm/apm-integrate-with-machine-learning.mdx
new file mode 100644
index 0000000000..e4f6a362e3
--- /dev/null
+++ b/docs/en/serverless/apm/apm-integrate-with-machine-learning.mdx
@@ -0,0 +1,70 @@
+---
+id: serverlessObservabilityApmIntegrateWithMachineLearning
+slug: /serverless/observability/apm-integrate-with-machine-learning
+title: Integrate with machine learning
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+The Machine learning integration initiates a new job predefined to calculate anomaly scores on APM transaction durations.
+With this integration, you can quickly pinpoint anomalous transactions and see the health of
+any upstream and downstream services.
+
+Machine learning jobs are created per environment and are based on a service's average response time.
+Because jobs are created at the environment level,
+you can add new services to your existing environments without the need for additional machine learning jobs.
+
+Results from machine learning jobs are shown in multiple places throughout the Applications UI:
+
+* The **Services overview** provides a quick-glance view of the general health of all of your services.
+
+ {/* TODO: Take this screenshot (no data in oblt now)
+ ![Example view of anomaly scores on response times in the Applications UI](images/machine-learning-integration/apm-service-quick-health.png) */}
+
+* The transaction duration chart will show the expected bounds and add an annotation when the anomaly score is 75 or above.
+
+ {/* TODO: Take this screenshot (no data in oblt now)
+ ![Example view of anomaly scores on response times in the Applications UI](images/machine-learning-integration/apm-apm-ml-integration.png) */}
+
+* Service Maps will display a color-coded anomaly indicator based on the detected anomaly score.
+
+ ![Example view of anomaly scores on service maps in the Applications UI](images/service-maps/service-map-anomaly.png)
+
+## Enable anomaly detection
+
+To enable machine learning anomaly detection:
+
+1. In your ((observability)) project, go to any **Applications** page.
+
+1. Click **Anomaly detection**.
+
+1. Click **Create Job**.
+
+1. Machine learning jobs are created at the environment level.
+ Select all of the service environments that you want to enable anomaly detection in.
+ Anomalies will surface for all services and transaction types within the selected environments.
+
+1. Click **Create Jobs**.
+
+That's it! After a few minutes, the job will begin calculating results;
+it might take additional time for results to appear on your service maps.
+To manage existing jobs, click **Manage jobs** (or go to **AIOps** → **Anomaly detection**).
+
+## Anomaly detection warning
+
+To make machine learning as easy as possible to set up,
+Elastic will warn you when filtered to an environment without a machine learning job.
+
+{/* TODO: Take this screenshot (no data in oblt now)
+![Example view of anomaly alert in the Applications UI](images/machine-learning-integration/apm-apm-anomaly-alert.png) */}
+
+## Unknown service health
+
+After enabling anomaly detection, service health may display as "Unknown". Here are some reasons why this can occur:
+
+1. No machine learning job exists. See Enable anomaly detection to enable anomaly detection and create a machine learning job.
+1. There is no machine learning data for the job. If you just created the machine learning job you'll need to wait a few minutes for data to be available. Alternatively, if the service or its environment are new, you'll need to wait for more trace data.
+1. No "request" or "page-load" transaction type exists for this service; service health is only available for these transaction types.
+
diff --git a/docs/en/serverless/apm/apm-keep-data-secure.mdx b/docs/en/serverless/apm/apm-keep-data-secure.mdx
new file mode 100644
index 0000000000..c7cb0ff6fc
--- /dev/null
+++ b/docs/en/serverless/apm/apm-keep-data-secure.mdx
@@ -0,0 +1,80 @@
+---
+id: serverlessObservabilityApmKeepDataSecure
+slug: /serverless/observability/apm-keep-data-secure
+title: Keep APM data secure
+description: Make sure APM data is sent to Elastic securely and sensitive data is protected.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+{/* TODO: Find out whether Editor or Admin is required to create and manage API keys. */}
+
+When setting up Elastic APM, it's essential to ensure that the data collected by
+APM agents is sent to Elastic securely and that sensitive data is protected.
+
+## Secure communication with APM agents
+
+Communication between APM agents and the managed intake service is both encrypted and authenticated.
+Requests without a valid API key will be denied.
+
+### Create a new API key
+
+To create a new API key:
+
+1. In your ((observability)) project, go to any **Applications** page.
+1. Click **Settings**.
+1. Select the **APM agent keys** tab.
+1. Click **Create APM agent key**.
+1. Name the key and assign privileges to it.
+1. Click **Create APM agent key**.
+1. Copy the key now. You will not be able to see it again. API keys do not expire.
+
+### Delete an API key
+
+To delete an API key:
+
+1. From any of the **Application** pages, click **Settings**.
+1. Select the **APM agent keys** tab.
+1. Search for the API key you want to delete.
+1. Click the trash can icon to delete the selected API key.
+
+### View existing API keys
+
+To view all API keys for your project:
+
+1. Expand **Project settings**.
+1. Select **Management**.
+1. Select **API keys**.
+
+## Data security
+
+When setting up Elastic APM, it's essential to review all captured data carefully to ensure it doesn't contain sensitive information like passwords, credit card numbers, or health data.
+
+Some APM agents offer a way to manipulate or drop APM events _before_ they leave your services.
+Refer to the relevant agent's documentation for more information and examples:
+
+### Java
+
+**`include_process_args`**: Remove process arguments from transactions. This option is disabled by default. Read more in the [Java agent configuration docs](((apm-java-ref-v))/config-reporter.html#config-include-process-args).
+
+### .NET
+
+**Filter API**: Drop APM events _before_ they are sent to Elastic. Read more in the [.NET agent Filter API docs](((apm-dotnet-ref-v))/public-api.html#filter-api).
+
+### Node.js
+
+* **`addFilter()`**: Drop APM events _before_ they are sent to Elastic. Read more in the [Node.js agent API docs](((apm-node-ref-v))/agent-api.html#apm-add-filter).
+* **`captureExceptions`**: Remove errors raised by the server-side process by disabling the `captureExceptions` configuration option. Read more in [the Node.js agent configuration docs](((apm-node-ref-v))/configuration.html#capture-exceptions).
+
+### Python
+
+**Custom processors**: Drop APM events _before_ they are sent to Elastic. Read more in the [Python agent Custom processors docs](((apm-py-ref-v))/sanitizing-data.html).
+
+### Ruby
+
+**`add_filter()`**: Drop APM events _before_ they are sent to Elastic. Read more in the [Ruby agent API docs](((apm-ruby-ref-v))/api.html#api-agent-add-filter).
diff --git a/docs/en/serverless/apm/apm-kibana-settings.mdx b/docs/en/serverless/apm/apm-kibana-settings.mdx
new file mode 100644
index 0000000000..004f68dedd
--- /dev/null
+++ b/docs/en/serverless/apm/apm-kibana-settings.mdx
@@ -0,0 +1,92 @@
+---
+id: serverlessObservabilityApmKibanaSettings
+slug: /serverless/observability/apm-kibana-settings
+title: Settings
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+You can adjust Application settings to fine-tune your experience in the Applications UI.
+
+## General settings
+
+To change APM settings, select **Settings** from any **Applications** page.
+The following settings are available.
+
+`observability:apmAgentExplorerView`
+
+: Enables the Agent explorer view.
+
+`observability:apmAWSLambdaPriceFactor`
+
+: Set the price per Gb-second for your AWS Lambda functions.
+
+`observability:apmAWSLambdaRequestCostPerMillion`
+
+: Set the AWS Lambda cost per million requests.
+
+`observability:apmEnableContinuousRollups`
+
+: When continuous rollups are enabled, the UI will select metrics with the appropriate resolution.
+On larger time ranges, lower resolution metrics will be used, which will improve loading times.
+
+`observability:apmEnableServiceMetrics`
+
+: Enables the usage of service transaction metrics, which are low cardinality metrics that can be used by certain views like the service inventory for faster loading times.
+
+`observability:apmLabsButton`
+
+: Enable or disable the APM Labs button — a quick way to enable and disable technical preview features in APM.
+
+{/* [[observability-apm-critical-path]]`observability:apmEnableCriticalPath`
+When enabled, displays the critical path of a trace. */}
+
+{/* [[observability-enable-progressive-loading]]`observability:apmProgressiveLoading`
+preview:[] When enabled, uses progressive loading of some APM views.
+Data may be requested with a lower sampling rate first, with lower accuracy but faster response times,
+while the unsampled data loads in the background. */}
+
+`observability:apmServiceGroupMaxNumberOfServices`
+
+: Limit the number of services in a given service group.
+
+{/* [[observability-apm-optimized-sort]]`observability:apmServiceInventoryOptimizedSorting`
+preview:[] Sorts services without anomaly detection rules on the APM Service inventory page by service name. */}
+
+`observability:apmDefaultServiceEnvironment`
+
+: Set the default environment for APM. When left empty, data from all environments will be displayed by default.
+
+`observability:apmEnableProfilingIntegration`
+
+: Enable the Universal Profiling integration in APM.
+
+{/* [[observability-enable-aws-lambda-metrics]]`observability:enableAwsLambdaMetrics`
+preview:[] Display Amazon Lambda metrics in the service metrics tab. */}
+
+`observability:enableComparisonByDefault`
+
+: Enable the comparison feature by default.
+
+`observability:enableInspectEsQueries`
+
+: When enabled, allows you to inspect Elasticsearch queries in API responses.
+
+{/* [[observability-apm-trace-explorer-tab]]`observability:apmTraceExplorerTab`
+preview:[] Enable the APM Trace Explorer feature, that allows you to search and inspect traces with KQL or EQL. */}
+
+## APM Labs
+
+**APM Labs** allows you to easily try out new features that are technical preview.
+
+To enable APM labs, go to **Applications** → **Settings** → **General settings** and toggle **Enable labs button in APM**.
+Select **Save changes** and refresh the page.
+
+After enabling **APM Labs** select **Labs** in the toolbar to see the technical preview features available to try out.
+
diff --git a/docs/en/serverless/apm/apm-observe-lambda-functions.mdx b/docs/en/serverless/apm/apm-observe-lambda-functions.mdx
new file mode 100644
index 0000000000..2dd456f9c0
--- /dev/null
+++ b/docs/en/serverless/apm/apm-observe-lambda-functions.mdx
@@ -0,0 +1,49 @@
+---
+id: serverlessObservabilityApmObserveLambdaFunctions
+slug: /serverless/observability/apm-observe-lambda-functions
+title: Observe Lambda functions
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+Elastic APM provides performance and error monitoring for AWS Lambda functions.
+See how your Lambda functions relate to and depend on other services, and
+get insight into function execution and runtime behavior, like lambda duration, cold start rate, cold start duration, compute usage, memory usage, and more.
+
+To set up Lambda monitoring, refer to .
+
+![lambda overview](images/apm-lambda/lambda-overview.png)
+
+## Cold starts
+
+A cold start occurs when a Lambda function has not been used for a certain period of time. A lambda worker receives a request to run the function and prepares an execution environment.
+
+Cold starts are an unavoidable byproduct of the serverless world, but visibility into how they impact your services can help you make better decisions about factors like how much memory to allocate to a function, whether to enable provisioned concurrency, or if it's time to consider removing a large dependency.
+
+### Cold start rate
+
+The cold start rate (i.e. proportion of requests that experience a cold start) is displayed per service and per transaction.
+
+Cold start is also displayed in the trace waterfall, where you can drill-down into individual traces and see trace metadata like AWS request ID, trigger type, and trigger request ID.
+
+{/* TODO: RETAKE
+![lambda cold start trace](images/apm-lambda/lambda-cold-start-trace.png) */}
+
+### Latency distribution correlation
+
+The latency correlations feature can be used to visualize the impact of Lambda cold starts on latency—just select the `faas.coldstart` field.
+
+{/* TODO: RETAKE
+![lambda correlations example](images/apm-lambda/lambda-correlations.png) */}
+
+## AWS Lambda function grouping
+
+The default APM agent configuration results in one APM service per AWS Lambda function,
+where the Lambda function name is the service name.
+
+In some use cases, it makes more sense to logically group multiple lambda functions under a single
+APM service. You can achieve this by setting the `ELASTIC_APM_SERVICE_NAME` environment variable
+on related Lambda functions to the same value.
+
diff --git a/docs/en/serverless/apm/apm-query-your-data.mdx b/docs/en/serverless/apm/apm-query-your-data.mdx
new file mode 100644
index 0000000000..aa29e21546
--- /dev/null
+++ b/docs/en/serverless/apm/apm-query-your-data.mdx
@@ -0,0 +1,75 @@
+---
+id: serverlessObservabilityApmQueryYourData
+slug: /serverless/observability/apm-query-your-data
+title: Query your data
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+Querying your APM data is an essential tool that can make finding bottlenecks in your code even more straightforward.
+
+Using the query bar, a powerful data query feature, you can pass advanced queries on your data
+to filter on specific pieces of information you’re interested in.
+APM queries entered into the query bar are added as parameters to the URL, so it’s easy to share a specific query or view with others.
+
+The query bar comes with a handy autocomplete that helps find the fields and even provides suggestions to the data they include.
+You can select the query bar and hit the down arrow on your keyboard to begin scanning recommendations.
+
+When you type, you can begin to see some of the fields available for filtering:
+
+![Example of the Kibana Query bar in the Applications UI](images/advanced-queries/apm-query-bar.png)
+
+
+
+To learn more about the ((kib)) query language capabilities, see the [Kibana Query Language Enhancements](((kibana-ref))/kuery-query.html) documentation.
+
+
+
+## APM queries
+
+APM queries can be handy for removing noise from your data in the Services, Transactions,
+Errors, Metrics, and Traces views.
+
+For example, in the **Services** view, you can quickly view a list of all the instrumented services running on your production
+environment: `service.environment : production`. Or filter the list by including the APM agent's name and the host it’s running on:
+`service.environment : "production" and agent.name : "java" and host.name : "prod-server1"`.
+
+On the **Traces** view, you might want to view failed transaction results from any of your running containers:
+`transaction.result :"FAILURE" and container.id : *`.
+
+On the **Transactions** view, you may want to list only the slower transactions than a specified time threshold: `transaction.duration.us > 2000000`.
+Or filter the list by including the service version and the Kubernetes pod it's running on:
+`transaction.duration.us > 2000000 and service.version : "7.12.0" and kubernetes.pod.name : "pod-5468b47f57-pqk2m"`.
+
+## Querying in Discover
+
+Alternatively, you can query your APM documents in [*Discover*](((kibana-ref))/discover.html).
+Querying documents in **Discover** works the same way as queries in the Applications UI,
+and **Discover** supports all of the example APM queries shown on this page.
+
+### Discover queries
+
+One example where you may want to make use of **Discover**
+is to view _all_ transactions for an endpoint instead of just a sample.
+
+Use the Applications UI to find a transaction name and time bucket that you're interested in learning more about.
+Then, switch to **Discover** and make a search:
+
+```shell
+processor.event: "transaction" AND transaction.name: "" and transaction.duration.us > 13000 and transaction.duration.us < 14000
+```
+
+In this example, we're interested in viewing all of the `APIRestController#customers` transactions
+that took between 13 and 14 milliseconds. Here's what Discover returns:
+
+![View all transactions in bucket](images/advanced-queries/advanced-discover.png)
+
+You can now explore the data until you find a specific transaction that you're interested in.
+Copy that transaction's `transaction.id` and paste it into APM to view the data in the context of the APM:
+
+![View specific transaction in the Applications UI](images/advanced-queries/specific-transaction-search.png)
+
+![View specific transaction in the Applications UI](images/advanced-queries/specific-transaction.png)
+
diff --git a/docs/en/serverless/apm/apm-reduce-your-data-usage.mdx b/docs/en/serverless/apm/apm-reduce-your-data-usage.mdx
new file mode 100644
index 0000000000..b35ba25589
--- /dev/null
+++ b/docs/en/serverless/apm/apm-reduce-your-data-usage.mdx
@@ -0,0 +1,21 @@
+---
+id: serverlessObservabilityApmReduceYourDataUsage
+slug: /serverless/observability/apm-reduce-your-data-usage
+title: Reduce your data usage
+description: Implement strategies for reducing your data usage without compromising the ability to analyze APM data.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+The richness and volume of APM data provides unique insights into your applications, but it can
+also mean higher costs and more noise when analyzing data. There are a couple strategies you can
+use to reduce your data usage while continuing to get the full value of APM data. Read more about
+these strategies:
+
+* : Reduce data storage, costs, and
+noise by ingesting only a percentage of all traces that you can extrapolate from in your analysis.
+* : Compress similar or identical spans to
+reduce storage overhead, processing power needed, and clutter in the Applications UI.
+* : Reduce the stacktrace information
+collected by your APM agents.
diff --git a/docs/en/serverless/apm/apm-reference.mdx b/docs/en/serverless/apm/apm-reference.mdx
new file mode 100644
index 0000000000..67a217fb8a
--- /dev/null
+++ b/docs/en/serverless/apm/apm-reference.mdx
@@ -0,0 +1,18 @@
+---
+id: serverlessObservabilityApmReference
+slug: /serverless/observability/apm-reference
+title: Reference
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+The following reference documentation is available:
+
+*
+* [API reference](https://docs.elastic.co/api-reference/observability/post_api-apm-agent-keys)
+
+In addition to the public API above, the APM managed intake service offers an
+.
+This API is exclusively for APM agent developers. The vast majority of users should have no reason to interact with this API.
diff --git a/docs/en/serverless/apm/apm-send-traces-to-elastic.mdx b/docs/en/serverless/apm/apm-send-traces-to-elastic.mdx
new file mode 100644
index 0000000000..ec5c75d42f
--- /dev/null
+++ b/docs/en/serverless/apm/apm-send-traces-to-elastic.mdx
@@ -0,0 +1,26 @@
+---
+id: serverlessObservabilityApmSendTracesToElastic
+slug: /serverless/observability/apm-send-data-to-elastic
+title: Send APM data to Elastic
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+ Want to get started quickly? See Get started with traces and APM.
+
+
+Send APM data to Elastic with:
+
+* **:** Elastic APM agents are lightweight libraries you install in your applications and services. They automatically instrument supported technologies, and offer APIs for custom code instrumentation.
+* **:** OpenTelemetry is a set of APIs, SDKs, tooling, and integrations that enable the capture and management of telemetry data from your services and applications.
+
+Elastic also supports instrumentation of .
+
+{/* To do: We should put a diagram here showing how high-level arch */}
diff --git a/docs/en/serverless/apm/apm-server-api.mdx b/docs/en/serverless/apm/apm-server-api.mdx
new file mode 100644
index 0000000000..16334822d6
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api.mdx
@@ -0,0 +1,59 @@
+---
+id: serverlessObservabilityApmServerApi
+slug: /serverless/observability/apm-server-api
+title: Managed intake service event API
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import Api from './apm-server-api/api.mdx'
+import ApiError from './apm-server-api/api-error.mdx'
+import ApiEvents from './apm-server-api/api-events.mdx'
+import ApiInfo from './apm-server-api/api-info.mdx'
+import ApiMetadata from './apm-server-api/api-metadata.mdx'
+import ApiMetricset from './apm-server-api/api-metricset.mdx'
+import ApiSpan from './apm-server-api/api-span.mdx'
+import ApiTransaction from './apm-server-api/api-transaction.mdx'
+import OtelAPI from './apm-server-api/otel-api.mdx'
+
+
+
+ This API is exclusively for APM agent developers. The vast majority of users should have no reason to interact with this API.
+
+
+
+
+## Server information API
+
+
+
+## Events intake API
+
+
+
+### Metadata
+
+
+
+### Transactions
+
+
+
+### Spans
+
+
+
+### Errors
+
+
+
+### Metrics
+
+
+
+## OpenTelemetry API
+
+
+
diff --git a/docs/en/serverless/apm/apm-server-api/api-error.mdx b/docs/en/serverless/apm/apm-server-api/api-error.mdx
new file mode 100644
index 0000000000..a0a97086d8
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-error.mdx
@@ -0,0 +1,18 @@
+
+
+import V2Error from '../../transclusion/apm/guide/spec/v2/error.mdx'
+
+
+
+An error or a logged error message captured by an agent occurring in a monitored service.
+
+
+
+#### Error Schema
+
+The managed intake service uses a JSON Schema to validate requests. The specification for errors is defined on
+[GitHub](https://github.com/elastic/apm-server/blob/main/docs/spec/v2/error.json) and included below.
+
+
+
+
diff --git a/docs/en/serverless/apm/apm-server-api/api-events.mdx b/docs/en/serverless/apm/apm-server-api/api-events.mdx
new file mode 100644
index 0000000000..4e35108231
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-events.mdx
@@ -0,0 +1,138 @@
+
+
+
+
+
+Most users do not need to interact directly with the events intake API.
+
+
+The events intake API is what we call the internal protocol that APM agents use to talk to the managed intake service.
+Agents communicate with the Server by sending events — captured pieces of information — in an HTTP request.
+Events can be:
+
+* Transactions
+* Spans
+* Errors
+* Metrics
+
+Each event is sent as its own line in the HTTP request body.
+This is known as [newline delimited JSON (NDJSON)](http://ndjson.org).
+
+With NDJSON, agents can open an HTTP POST request and use chunked encoding to stream events to the managed intake service
+as soon as they are recorded in the agent.
+This makes it simple for agents to serialize each event to a stream of newline delimited JSON.
+The managed intake service also treats the HTTP body as a compressed stream and thus reads and handles each event independently.
+
+Refer to to learn more about the different types of events.
+
+
+
+### Endpoints
+
+The managed intake service exposes the following endpoints for Elastic APM agent data intake:
+
+| Name | Endpoint |
+|---|---|
+| APM agent event intake | `/intake/v2/events` |
+
+{/* | RUM event intake (v2) | `/intake/v2/rum/events` |
+| RUM event intake (v3) | `/intake/v3/rum/events` | */}
+
+
+
+### Request
+
+Send an `HTTP POST` request to the managed intake service `intake/v2/events` endpoint:
+
+```bash
+https://{hostname}:{port}/intake/v2/events
+```
+
+The managed intake service supports asynchronous processing of batches.
+To request asynchronous processing the `async` query parameter can be set in the POST request
+to the `intake/v2/events` endpoint:
+
+```bash
+https://{hostname}:{port}/intake/v2/events?async=true
+```
+
+
+Since asynchronous processing defers some of the event processing to the
+background and takes place after the client has closed the request, some errors
+can't be communicated back to the client and are logged by the managed intake service.
+Furthermore, asynchronous processing requests will only be scheduled if the managed intake service can
+service the incoming request, requests that cannot be serviced will receive an internal error
+`503` "queue is full" error.
+
+
+{/* For RUM send an `HTTP POST` request to the managed intake service `intake/v3/rum/events` endpoint instead:
+
+```bash
+http(s)://{hostname}:{port}/intake/v3/rum/events
+``` */}
+
+
+
+### Response
+
+On success, the server will respond with a 202 Accepted status code and no body.
+
+Keep in mind that events can succeed and fail independently of each other. Only if all events succeed does the server respond with a 202.
+
+
+
+### API Errors
+
+There are two types of errors that the managed intake service may return to an agent:
+
+* Event related errors (typically validation errors)
+* Non-event related errors
+
+The managed intake service processes events one after the other.
+If an error is encountered while processing an event,
+the error encountered as well as the document causing the error are added to an internal array.
+The managed intake service will only save 5 event related errors.
+If it encounters more than 5 event related errors,
+the additional errors will not be returned to agent.
+Once all events have been processed,
+the error response is sent.
+
+Some errors, not relating to specific events,
+may terminate the request immediately.
+For example: IP rate limit reached, wrong metadata, etc.
+If at any point one of these errors is encountered,
+it is added to the internal array and immediately returned.
+
+An example error response might look something like this:
+
+```json
+{
+ "errors": [
+ {
+ "message": "", [^1]
+ "document": "" [^2]
+ },{
+ "message": "",
+ "document": ""
+ },{
+ "message": "",
+ "document": ""
+ },{
+ "message": "too many requests" [^3]
+ },
+ ],
+ "accepted": 2320 [^4]
+}
+```
+[^1]: An event related error
+[^2]: The document causing the error
+[^3]: An immediately returning non-event related error
+[^4]: The number of accepted events
+
+If you're developing an agent, these errors can be useful for debugging.
+
+
+
+### Event API Schemas
+
+The managed intake service uses a collection of JSON Schemas for validating requests to the intake API.
diff --git a/docs/en/serverless/apm/apm-server-api/api-info.mdx b/docs/en/serverless/apm/apm-server-api/api-info.mdx
new file mode 100644
index 0000000000..244d44a4bb
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-info.mdx
@@ -0,0 +1,38 @@
+
+
+
+
+The managed intake service exposes an API endpoint to query general server information.
+This lightweight endpoint is useful as a server up/down health check.
+
+
+
+### Server Information endpoint
+
+Send an `HTTP GET` request to the server information endpoint:
+
+```bash
+https://{hostname}:{port}/
+```
+
+This endpoint always returns an HTTP 200.
+
+Requests to this endpoint must be authenticated.
+
+
+
+#### Example
+
+Example managed intake service information request:
+
+```sh
+curl -X POST http://127.0.0.1:8200/ \
+ -H "Authorization: ApiKey api_key"
+
+{
+ "build_date": "2021-12-18T19:59:06Z",
+ "build_sha": "24fe620eeff5a19e2133c940c7e5ce1ceddb1445",
+ "publish_ready": true,
+ "version": "((version))"
+}
+```
diff --git a/docs/en/serverless/apm/apm-server-api/api-metadata.mdx b/docs/en/serverless/apm/apm-server-api/api-metadata.mdx
new file mode 100644
index 0000000000..1faa07dc72
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-metadata.mdx
@@ -0,0 +1,60 @@
+
+
+import V2Metadata from '../../transclusion/apm/guide/spec/v2/metadata.mdx'
+
+
+
+Every new connection to the managed intake service starts with a `metadata` stanza.
+This provides general metadata concerning the other objects in the stream.
+
+Rather than send this metadata information from the agent multiple times,
+the managed intake service hangs on to this information and applies it to other objects in the stream as necessary.
+
+
+Metadata is stored under `context` when viewing documents in ((es)).
+
+
+#### Metadata Schema
+
+The managed intake service uses JSON Schema to validate requests. The specification for metadata is defined on
+[GitHub](https://github.com/elastic/apm-server/blob/main/docs/spec/v2/metadata.json) and included below.
+
+
+
+
+
+#### Kubernetes data
+
+APM agents automatically read Kubernetes data and send it to the managed intake service.
+In most instances, agents are able to read this data from inside the container.
+If this is not the case, or if you wish to override this data, you can set environment variables for the agents to read.
+These environment variable are set via the Kubernetes [Downward API](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#use-pod-fields-as-values-for-environment-variables).
+Here's how you would add the environment variables to your Kubernetes pod spec:
+
+```yaml
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBERNETES_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: KUBERNETES_POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+```
+
+The table below maps these environment variables to the APM metadata event field:
+
+| Environment variable | Metadata field name |
+|---|---|
+| `KUBERNETES_NODE_NAME` | system.kubernetes.node.name |
+| `KUBERNETES_POD_NAME` | system.kubernetes.pod.name |
+| `KUBERNETES_NAMESPACE` | system.kubernetes.namespace |
+| `KUBERNETES_POD_UID` | system.kubernetes.pod.uid |
diff --git a/docs/en/serverless/apm/apm-server-api/api-metricset.mdx b/docs/en/serverless/apm/apm-server-api/api-metricset.mdx
new file mode 100644
index 0000000000..ea9031f136
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-metricset.mdx
@@ -0,0 +1,18 @@
+
+
+import V2Metricset from '../../transclusion/apm/guide/spec/v2/metricset.mdx'
+
+
+
+Metrics contain application metric data captured by an ((apm-agent)).
+
+
+
+#### Metric Schema
+
+The managed intake service uses JSON Schema to validate requests. The specification for metrics is defined on
+[GitHub](https://github.com/elastic/apm-server/blob/main/docs/spec/v2/metricset.json) and included below.
+
+
+
+
diff --git a/docs/en/serverless/apm/apm-server-api/api-span.mdx b/docs/en/serverless/apm/apm-server-api/api-span.mdx
new file mode 100644
index 0000000000..eac1803c2b
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-span.mdx
@@ -0,0 +1,18 @@
+
+
+import V2Span from '../../transclusion/apm/guide/spec/v2/span.mdx'
+
+
+
+Spans are events captured by an agent occurring in a monitored service.
+
+
+
+#### Span Schema
+
+The managed intake service uses JSON Schema to validate requests. The specification for spans is defined on
+[GitHub](https://github.com/elastic/apm-server/blob/main/docs/spec/v2/span.json) and included below.
+
+
+
+
diff --git a/docs/en/serverless/apm/apm-server-api/api-transaction.mdx b/docs/en/serverless/apm/apm-server-api/api-transaction.mdx
new file mode 100644
index 0000000000..943c30623c
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api-transaction.mdx
@@ -0,0 +1,18 @@
+
+
+import V2Transaction from '../../transclusion/apm/guide/spec/v2/transaction.mdx'
+
+
+
+Transactions are events corresponding to an incoming request or similar task occurring in a monitored service.
+
+
+
+#### Transaction Schema
+
+The managed intake service uses JSON Schema to validate requests. The specification for transactions is defined on
+[GitHub](https://github.com/elastic/apm-server/blob/main/docs/spec/v2/transaction.json) and included below.
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/apm/apm-server-api/api.mdx b/docs/en/serverless/apm/apm-server-api/api.mdx
new file mode 100644
index 0000000000..d3a68255b7
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/api.mdx
@@ -0,0 +1,9 @@
+
+
+
+
+The managed intake service exposes endpoints for:
+
+* The managed intake service information API
+* Elastic APM events intake API
+* OpenTelemetry intake API
diff --git a/docs/en/serverless/apm/apm-server-api/otel-api.mdx b/docs/en/serverless/apm/apm-server-api/otel-api.mdx
new file mode 100644
index 0000000000..6445c28858
--- /dev/null
+++ b/docs/en/serverless/apm/apm-server-api/otel-api.mdx
@@ -0,0 +1,29 @@
+
+Elastic supports receiving traces, metrics, and logs over the
+[OpenTelemetry Protocol (OTLP)](https://opentelemetry.io/docs/specs/otlp/).
+OTLP is the default transfer protocol for OpenTelemetry and is supported natively by the managed intake service.
+
+The managed intake service supports two OTLP communication protocols on the same port:
+
+* OTLP/HTTP (protobuf)
+* OTLP/gRPC
+
+### OTLP/gRPC paths
+
+| Name | Endpoint |
+|---|---|
+|OTLP metrics intake |`/opentelemetry.proto.collector.metrics.v1.MetricsService/Export`
+|OTLP trace intake |`/opentelemetry.proto.collector.trace.v1.TraceService/Export`
+|OTLP logs intake |`/opentelemetry.proto.collector.logs.v1.LogsService/Export`
+
+### OTLP/HTTP paths
+
+| Name | Endpoint |
+|---|---|
+|OTLP metrics intake |`/v1/metrics`
+|OTLP trace intake |`/v1/traces`
+|OTLP logs intake |`/v1/logs`
+
+
+ See our to learn how to send data to the managed intake service from an OpenTelemetry agent OpenTelemetry collector.
+
\ No newline at end of file
diff --git a/docs/en/serverless/apm/apm-stacktrace-collection.mdx b/docs/en/serverless/apm/apm-stacktrace-collection.mdx
new file mode 100644
index 0000000000..7b02d1f1df
--- /dev/null
+++ b/docs/en/serverless/apm/apm-stacktrace-collection.mdx
@@ -0,0 +1,15 @@
+---
+id: serverlessObservabilityApmReduceStackTrace
+slug: /serverless/observability/apm-stacktrace-collection
+title: Stacktrace collection
+description: Reduce data storage and costs by reducing stacktrace collection
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+Elastic APM agents collect `stacktrace` information under certain circumstances. This can be very helpful in identifying issues in your code, but it also comes with an overhead at collection time and increases your storage usage.
+
+Stack trace collection settings are managed in each APM agent. You can enable and disable this feature, or set specific configuration limits, like the maximum number of stacktrace frames to collect, or the minimum duration of a stacktrace to collect.
+
+See the relevant [((apm-agent)) documentation](((apm-agents-ref))/index.html) to learn how to customize stacktrace collection.
diff --git a/docs/en/serverless/apm/apm-track-deployments-with-annotations.mdx b/docs/en/serverless/apm/apm-track-deployments-with-annotations.mdx
new file mode 100644
index 0000000000..53dfa70cc6
--- /dev/null
+++ b/docs/en/serverless/apm/apm-track-deployments-with-annotations.mdx
@@ -0,0 +1,56 @@
+---
+id: serverlessObservabilityApmTrackDeploymentsWithAnnotations
+slug: /serverless/observability/apm-track-deployments-with-annotations
+title: Track deployments with annotations
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+![Example view of transactions annotation in the Applications UI](images/annotations/apm-transaction-annotation.png)
+
+For enhanced visibility into your deployments, we offer deployment annotations on all transaction charts.
+This feature enables you to easily determine if your deployment has increased response times for an end-user,
+or if the memory/CPU footprint of your application has changed.
+Being able to quickly identify bad deployments enables you to rollback and fix issues without causing costly outages.
+
+By default, automatic deployment annotations are enabled.
+This means APM will create an annotation on your data when the `service.version` of your application changes.
+
+Alternatively, you can explicitly create deployment annotations with our annotation API.
+The API can integrate into your CI/CD pipeline,
+so that each time you deploy, a POST request is sent to the annotation API endpoint:
+
+{/* TODO: This is commented out for now, but it might be nice to add a working example? */}
+{/* ```shell
+curl -X POST \
+ http://localhost:5601/api/apm/services/${SERVICE_NAME}/annotation \ [^1]
+-H 'Content-Type: application/json' \
+-H 'kbn-xsrf: true' \
+-H 'Authorization: Basic ${API_KEY}' \ [^2]
+-d '{
+ "@timestamp": "${DEPLOY_TIME}", [^3]
+ "service": {
+ "version": "${SERVICE_VERSION}" [^4]
+ },
+ "message": "${MESSAGE}" [^5]
+ }'
+```
+[^1]: The `service.name` of your application
+[^2]: An APM API key with sufficient privileges
+[^3]: The time of the deployment
+[^4]: The `service.version` to be displayed in the annotation
+[^5]: A custom message to be displayed in the annotation */}
+
+{/* Todo: Link to API docs */}
+See the Annotation API reference for more information.
+
+
+If custom annotations have been created for the selected time period, any derived annotations, i.e., those created automatically when `service.version` changes, will not be shown.
+
+
diff --git a/docs/en/serverless/apm/apm-transaction-sampling.mdx b/docs/en/serverless/apm/apm-transaction-sampling.mdx
new file mode 100644
index 0000000000..0700bbfc7e
--- /dev/null
+++ b/docs/en/serverless/apm/apm-transaction-sampling.mdx
@@ -0,0 +1,92 @@
+---
+id: serverlessObservabilityApmTransactionSampling
+slug: /serverless/observability/apm-transaction-sampling
+title: Transaction sampling
+description: Reduce data storage, costs, and noise by ingesting only a percentage of all traces that you can extrapolate from in your analysis.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import ConfigureHeadBasedSampling from './apm-transaction-sampling/configure-head-based-sampling.mdx'
+
+Distributed tracing can
+generate a substantial amount of data. More data can mean higher costs and more noise.
+Sampling aims to lower the amount of data ingested and the effort required to analyze that data —
+all while still making it easy to find anomalous patterns in your applications, detect outages, track errors,
+and lower mean time to recovery (MTTR).
+
+## Head-based sampling
+
+In head-based sampling, the sampling decision for each trace is made when the trace is initiated.
+Each trace has a defined and equal probability of being sampled.
+
+For example, a sampling value of `.2` indicates a transaction sample rate of `20%`.
+This means that only `20%` of traces will send and retain all of their associated information.
+The remaining traces will drop contextual information to reduce the transfer and storage size of the trace.
+
+Head-based sampling is quick and easy to set up.
+Its downside is that it's entirely random — interesting
+data might be discarded purely due to chance.
+
+### Distributed tracing with head-based sampling
+
+In a _distributed_ trace, the sampling decision is still made when the trace is initiated.
+Each subsequent service respects the initial service's sampling decision, regardless of its configured sample rate;
+the result is a sampling percentage that matches the initiating service.
+
+In this example, `Service A` initiates four transactions and has sample rate of `.5` (`50%`).
+The sample rates of `Service B` and `Service C` are ignored.
+
+![Distributed tracing and head based sampling example one](../images/apm-dt-sampling-example-1.png)
+
+In this example, `Service A` initiates four transactions and has a sample rate of `1` (`100%`).
+Again, the sample rates of `Service B` and `Service C` are ignored.
+
+![Distributed tracing and head based sampling example two](../images/apm-dt-sampling-example-2.png)
+
+### OpenTelemetry with head-based sampling
+
+Head-based sampling is implemented directly in the APM agents and SDKs.
+The sample rate must be propagated between services and the managed intake service in order to produce accurate metrics.
+
+OpenTelemetry offers multiple samplers. However, most samplers do not propagate the sample rate.
+This results in inaccurate span-based metrics, like APM throughput, latency, and error metrics.
+
+For accurate span-based metrics when using head-based sampling with OpenTelemetry, you must use
+a [consistent probability sampler](https://opentelemetry.io/docs/specs/otel/trace/tracestate-probability-sampling/).
+These samplers propagate the sample rate between services and the managed intake service, resulting in accurate metrics.
+
+
+ OpenTelemetry does not offer consistent probability samplers in all languages. Refer to the documentation of your favorite OpenTelemetry agent or SDK for more information.
+
+
+## Sampled data and visualizations
+
+A sampled trace retains all data associated with it.
+A non-sampled trace drops all span and transaction data.
+Regardless of the sampling decision, all traces retain error data.
+
+Some visualizations in the Applications UI, like latency, are powered by aggregated transaction and span metrics.
+Metrics are based on sampled traces and weighted by the inverse sampling rate.
+For example, if you sample at 5%, each trace is counted as 20.
+As a result, as the variance of latency increases, or the sampling rate decreases, your level of error will increase.
+
+## Sample rates
+
+What's the best sampling rate? Unfortunately, there isn't one.
+Sampling is dependent on your data, the throughput of your application, data retention policies, and other factors.
+A sampling rate from `.1%` to `100%` would all be considered normal.
+You'll likely decide on a unique sample rate for different scenarios.
+Here are some examples:
+
+* Services with considerably more traffic than others might be safe to sample at lower rates
+* Routes that are more important than others might be sampled at higher rates
+* A production service environment might warrant a higher sampling rate than a development environment
+* Failed trace outcomes might be more interesting than successful traces — thus requiring a higher sample rate
+
+Regardless of the above, cost conscious customers are likely to be fine with a lower sample rate.
+
+## Configure head-based sampling
+
+
diff --git a/docs/en/serverless/apm/apm-transaction-sampling/configure-head-based-sampling.mdx b/docs/en/serverless/apm/apm-transaction-sampling/configure-head-based-sampling.mdx
new file mode 100644
index 0000000000..313a8f73b7
--- /dev/null
+++ b/docs/en/serverless/apm/apm-transaction-sampling/configure-head-based-sampling.mdx
@@ -0,0 +1,26 @@
+
+
+{/* There are three ways to adjust the head-based sampling rate of your APM agents:
+
+### Dynamic configuration
+
+The transaction sample rate can be changed dynamically (no redeployment necessary) on a per-service and per-environment
+basis with [((apm-agent)) Configuration](((kibana-ref))/agent-configuration.html) in ((kib)). */}
+
+{/* ### ((kib)) API configuration
+
+((apm-agent)) configuration exposes an API that can be used to programmatically change
+your agents' sampling rate.
+An example is provided in the [Agent configuration API reference](((kibana-ref))/agent-config-api.html). */}
+
+Each APM agent provides a configuration value used to set the transaction sample rate.
+Refer to the relevant agent's documentation for more details:
+
+* Go: [`ELASTIC_APM_TRANSACTION_SAMPLE_RATE`](((apm-go-ref-v))/configuration.html#config-transaction-sample-rate)
+* Java: [`transaction_sample_rate`](((apm-java-ref-v))/config-core.html#config-transaction-sample-rate)
+* .NET: [`TransactionSampleRate`](((apm-dotnet-ref-v))/config-core.html#config-transaction-sample-rate)
+* Node.js: [`transactionSampleRate`](((apm-node-ref-v))/configuration.html#transaction-sample-rate)
+* PHP: [`transaction_sample_rate`](((apm-php-ref))/configuration-reference.html#config-transaction-sample-rate)
+* Python: [`transaction_sample_rate`](((apm-py-ref-v))/configuration.html#config-transaction-sample-rate)
+* Ruby: [`transaction_sample_rate`](((apm-ruby-ref-v))/configuration.html#config-transaction-sample-rate)
+
diff --git a/docs/en/serverless/apm/apm-troubleshooting.mdx b/docs/en/serverless/apm/apm-troubleshooting.mdx
new file mode 100644
index 0000000000..28f3cedeab
--- /dev/null
+++ b/docs/en/serverless/apm/apm-troubleshooting.mdx
@@ -0,0 +1,50 @@
+---
+id: serverlessObservabilityApmTroubleshooting
+slug: /serverless/observability/apm-troubleshooting
+title: Troubleshooting
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import CommonProblems from './apm-troubleshooting/common-problems.mdx'
+import CommonResponseCodes from './apm-troubleshooting/common-response-codes.mdx'
+
+
+
+This section provides solutions to common questions and problems,
+and processing and performance guidance.
+
+## Common problems
+
+
+
+## Common response codes
+
+
+
+## Related troubleshooting resources
+
+For additional help with other APM components, see the links below.
+((agent)) and each ((apm-agent)) has its own troubleshooting guide:
+
+* [((fleet)) and ((agent)) troubleshooting](((fleet-guide))/troubleshooting-intro.html)
+* [.NET agent troubleshooting](((apm-dotnet-ref))/troubleshooting.html)
+* [Go agent troubleshooting](((apm-go-ref))/troubleshooting.html)
+* [Java agent troubleshooting](((apm-java-ref))/trouble-shooting.html)
+* [Node.js agent troubleshooting](((apm-node-ref))/troubleshooting.html)
+* [PHP agent troubleshooting](((apm-php-ref))/troubleshooting.html)
+* [Python agent troubleshooting](((apm-py-ref))/troubleshooting.html)
+* [Ruby agent troubleshooting](((apm-ruby-ref))/debugging.html)
+
+## Elastic Support
+
+We offer a support experience unlike any other.
+Our team of professionals 'speak human and code' and love making your day.
+[Learn more about subscriptions](https://www.elastic.co/subscriptions).
+
+{/* ### Discussion forum
+
+For additional questions and feature requests,
+visit our [discussion forum](https://discuss.elastic.co/c/apm). */}
diff --git a/docs/en/serverless/apm/apm-troubleshooting/common-problems.mdx b/docs/en/serverless/apm/apm-troubleshooting/common-problems.mdx
new file mode 100644
index 0000000000..1cf40cb07b
--- /dev/null
+++ b/docs/en/serverless/apm/apm-troubleshooting/common-problems.mdx
@@ -0,0 +1,69 @@
+
+
+import NoDataIndexed from '../../transclusion/apm/guide/tab-widgets/no-data-indexed/fleet-managed.mdx'
+
+This section describes common problems you might encounter.
+
+{/* * No data is indexed
+* APM Server response codes
+* Common SSL-related problems
+* I/O Timeout
+* Field limit exceeded */}
+
+### No data is indexed
+
+If no data shows up, first make sure that your APM components are properly connected.
+
+
+
+
+
+### Data is indexed but doesn't appear in the Applications UI
+
+Elastic APM relies on default index mappings, data streams, and pipelines to query and display data.
+If your APM data isn't showing up in the Applications UI, but is elsewhere in Elastic, like Discover,
+you've likely made a change that overwrote a default.
+If you've manually changed a data stream, index template, or index pipeline,
+please verify you are not interfering with the default APM setup.
+
+{/* ### I/O Timeout
+
+I/O Timeouts can occur when your timeout settings across the stack are not configured correctly,
+especially when using a load balancer.
+
+You may see an error like the one below in the ((apm-agent)) logs, and/or a similar error on the intake side:
+
+```logs
+[ElasticAPM] APM Server responded with an error:
+"read tcp 123.34.22.313:8200->123.34.22.40:41602: i/o timeout"
+```
+
+To fix this error, ensure timeouts are incrementing from the ((apm-agent)),
+through your load balancer, to the Elastic APM intake.
+
+By default, Elastic APM agent timeouts are set at 10 seconds, and the Elastic intake timeout is set at 60 seconds.
+Your load balancer should be set somewhere between these numbers.
+
+For example:
+
+```txt
+APM agent --> Load Balancer --> Elastic APM intake
+ 10s 15s 60s
+``` */}
+
+
+
+### Field limit exceeded
+
+When adding too many distinct tag keys on a transaction or span,
+you risk creating a [mapping explosion](((ref))/mapping.html#mapping-limit-settings).
+
+For example, you should avoid that user-specified data,
+like URL parameters, is used as a tag key.
+Likewise, using the current timestamp or a user ID as a tag key is not a good idea.
+However, tag **values** with a high cardinality are not a problem.
+Just try to keep the number of distinct tag keys at a minimum.
+
+The symptom of a mapping explosion is that transactions and spans are not indexed anymore after a certain time. Usually, on the next day,
+the spans and transactions will be indexed again because a new index is created each day.
+But as soon as the field limit is reached, indexing stops again.
diff --git a/docs/en/serverless/apm/apm-troubleshooting/common-response-codes.mdx b/docs/en/serverless/apm/apm-troubleshooting/common-response-codes.mdx
new file mode 100644
index 0000000000..8e1415a401
--- /dev/null
+++ b/docs/en/serverless/apm/apm-troubleshooting/common-response-codes.mdx
@@ -0,0 +1,19 @@
+
+
+
+### HTTP 400: Data decoding error / Data validation error
+
+The most likely cause for this error is using an incompatible version of an ((apm-agent)).
+See minimum supported APM agent versions to verify compatibility.
+
+
+
+### HTTP 400: Event too large
+
+APM agents communicate with the Managed intake service by sending events in an HTTP request. Each event is sent as its own line in the HTTP request body. If events are too large, you can reduce the size of the events that your APM agents send by: or .
+
+
+
+### HTTP 401: Invalid token
+
+The API key is invalid.
diff --git a/docs/en/serverless/apm/apm-ui-dependencies.mdx b/docs/en/serverless/apm/apm-ui-dependencies.mdx
new file mode 100644
index 0000000000..d1ed6409b8
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-dependencies.mdx
@@ -0,0 +1,51 @@
+---
+id: serverlessObservabilityApmDependencies
+slug: /serverless/observability/apm-dependencies
+title: Dependencies
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import FeatureBeta from '../partials/feature-beta.mdx'
+
+APM agents collect details about external calls made from instrumented services.
+Sometimes, these external calls resolve into a downstream service that's instrumented — in these cases,
+you can utilize distributed tracing to drill down into problematic downstream services.
+Other times, though, it's not possible to instrument a downstream dependency —
+like with a database or third-party service.
+**Dependencies** gives you a window into these uninstrumented, downstream dependencies.
+
+![Dependencies view in the Applications UI](images/dependencies/dependencies.png)
+
+Many application issues are caused by slow or unresponsive downstream dependencies.
+And because a single, slow dependency can significantly impact the end-user experience,
+it's important to be able to quickly identify these problems and determine the root cause.
+
+Select a dependency to see detailed latency, throughput, and failed transaction rate metrics.
+
+![Dependencies drilldown view in the Applications UI](images/dependencies/dependencies-drilldown.png)
+
+When viewing a dependency, consider your pattern of usage with that dependency.
+If your usage pattern _hasn't_ increased or decreased,
+but the experience has been negatively affected—either with an increase in latency or errors—there's
+likely a problem with the dependency that needs to be addressed.
+
+If your usage pattern _has_ changed, the dependency view can quickly show you whether
+that pattern change exists in all upstream services, or just a subset of your services.
+You might then start digging into traces coming from
+impacted services to determine why that pattern change has occurred.
+
+## Operations
+
+
+
+**Dependency operations** provides a granular breakdown of the operations/queries a dependency is executing.
+
+![operations view in the Applications UI](images/dependencies/operations.png)
+
+Selecting an operation displays the operation's impact and performance trends over time, via key metrics like latency, throughput, and failed transaction rate. In addition, the **Trace sample timeline** provides a visual drill-down into an end-to-end trace sample.
+
+![operations detail view in the Applications UI](images/dependencies/operations-detail.png)
+
diff --git a/docs/en/serverless/apm/apm-ui-errors.mdx b/docs/en/serverless/apm/apm-ui-errors.mdx
new file mode 100644
index 0000000000..841d3a4a55
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-errors.mdx
@@ -0,0 +1,40 @@
+---
+id: serverlessObservabilityApmErrors
+slug: /serverless/observability/apm-errors
+title: Errors
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+_Errors_ are groups of exceptions with a similar exception or log message.
+The **Errors** overview provides a high-level view of the exceptions that APM agents catch,
+or that users manually report with APM agent APIs.
+Like errors are grouped together to make it easy to quickly see which errors are affecting your services,
+and to take actions to rectify them.
+
+A service returning a 5xx code from a request handler, controller, etc., will not create
+an exception that an APM agent can catch, and will therefore not show up in this view.
+
+![APM Errors overview](images/errors/apm-errors-overview.png)
+
+Selecting an error group ID or error message brings you to the **Error group**.
+
+![APM Error group](images/errors/apm-error-group.png)
+
+The error group details page visualizes the number of error occurrences over time and compared to a recent time range.
+This allows you to quickly determine if the error rate is changing or remaining constant.
+You'll also see the top 5 affected transactions—enabling you to quickly narrow down which transactions are most impacted
+by the selected error.
+
+Further down, you'll see an Error sample.
+The error shown is always the most recent to occur.
+The sample includes the exception message, culprit, stack trace where the error occurred,
+and additional contextual information to help debug the issue—all of which can be copied with the click of a button.
+
+In some cases, you might also see a Transaction sample ID.
+This feature allows you to make a connection between the errors and transactions,
+by linking you to the specific transaction where the error occurred.
+This allows you to see the whole trace, including which services the request went through.
+
diff --git a/docs/en/serverless/apm/apm-ui-infrastructure.mdx b/docs/en/serverless/apm/apm-ui-infrastructure.mdx
new file mode 100644
index 0000000000..84b9516cf4
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-infrastructure.mdx
@@ -0,0 +1,22 @@
+---
+id: serverlessObservabilityApmInfrastructure
+slug: /serverless/observability/apm-infrastructure
+title: Infrastructure
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import FeatureBeta from '../partials/feature-beta.mdx'
+
+
+
+The **Infrastructure** tab provides information about the containers, pods, and hosts
+that the selected service is linked to.
+
+![Example view of the Infrastructure tab in the Applications UI](images/infrastructure/infra.png)
+
+IT ops and software reliability engineers (SREs) can use this tab
+to quickly find a service's underlying infrastructure resources when debugging a problem.
+Knowing what infrastructure is related to a service allows you to remediate issues by restarting, killing hanging instances, changing configuration, rolling back deployments, scaling up, scaling out, and so on.
diff --git a/docs/en/serverless/apm/apm-ui-logs.mdx b/docs/en/serverless/apm/apm-ui-logs.mdx
new file mode 100644
index 0000000000..303d6aa4e5
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-logs.mdx
@@ -0,0 +1,22 @@
+---
+id: serverlessObservabilityApmLogs
+slug: /serverless/observability/apm-logs
+title: Logs
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import LogOverview from '../transclusion/kibana/logs/log-overview.mdx'
+
+The **Logs** tab shows contextual logs for the selected service.
+
+
+
+![Example view of the Logs tab in the Applications UI](images/logs/logs.png)
+
+
+Logs displayed on this page are filtered on `service.name`
+
+
diff --git a/docs/en/serverless/apm/apm-ui-metrics.mdx b/docs/en/serverless/apm/apm-ui-metrics.mdx
new file mode 100644
index 0000000000..2f70687da1
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-metrics.mdx
@@ -0,0 +1,28 @@
+---
+id: serverlessObservabilityApmMetrics
+slug: /serverless/observability/apm-metrics
+title: Metrics
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+The **Metrics** overview provides APM agent-specific metrics,
+which lets you perform more in-depth root cause analysis investigations within the Applications UI.
+
+If you're experiencing a problem with your service, you can use this page to attempt to find the underlying cause.
+For example, you might be able to correlate a high number of errors with a long transaction duration, high CPU usage, or a memory leak.
+
+![Example view of the Metrics overview in the Applications UI](images/metrics/apm-metrics.png)
+
+If you're using the Java APM agent, you can view metrics for each JVM.
+
+![Example view of the Metrics overview for the Java Agent](images/metrics/jvm-metrics-overview.png)
+
+Breaking down metrics by JVM makes it much easier to analyze the provided metrics:
+CPU usage, memory usage, heap or non-heap memory,
+thread count, garbage collection rate, and garbage collection time spent per minute.
+
+![Example view of the Metrics overview for the Java Agent](images/metrics/jvm-metrics.png)
+
diff --git a/docs/en/serverless/apm/apm-ui-overview.mdx b/docs/en/serverless/apm/apm-ui-overview.mdx
new file mode 100644
index 0000000000..72741b4d24
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-overview.mdx
@@ -0,0 +1,28 @@
+---
+id: serverlessObservabilityApmUiOverview
+slug: /serverless/observability/apm-ui-overview
+title: Navigate the Applications UI
+description: Learn how to navigate the Applications UI.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+For a quick, high-level overview of the health and performance of your application,
+start with:
+
+* Services
+* Traces
+* Dependencies
+* Service Map
+
+Notice something awry? Select a service or trace and dive deeper with:
+
+* Service overview
+* Transactions
+* Trace sample timeline
+* Errors
+* Metrics
+* Infrastructure
+* Logs
+
diff --git a/docs/en/serverless/apm/apm-ui-service-map.mdx b/docs/en/serverless/apm/apm-ui-service-map.mdx
new file mode 100644
index 0000000000..598393f795
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-service-map.mdx
@@ -0,0 +1,114 @@
+---
+id: serverlessObservabilityApmServiceMap
+slug: /serverless/observability/apm-service-map
+title: Service map
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+A service map is a real-time visual representation of the instrumented services in your application's architecture.
+It shows you how these services are connected, along with high-level metrics like average transaction duration,
+requests per minute, and errors per minute.
+If enabled, service maps also integrate with machine learning—for real-time health indicators based on anomaly detection scores.
+All of these features can help you quickly and visually assess your services' status and health.
+
+We currently surface two types of service maps:
+
+* **Global**: All services instrumented with APM agents and the connections between them are shown.
+* **Service-specific**: Highlight connections for a selected service.
+
+## How do service maps work?
+
+Service Maps rely on distributed traces to draw connections between services.
+As [distributed tracing](((apm-guide-ref))/apm-distributed-tracing.html) is enabled out-of-the-box for supported technologies, so are service maps.
+However, if a service isn't instrumented,
+or a `traceparent` header isn't being propagated to it,
+distributed tracing will not work, and the connection will not be drawn on the map.
+
+## Visualize your architecture
+
+From **Services**, switch to the **Service Map** tab to get started.
+By default, all instrumented services and connections are shown.
+Whether you're onboarding a new engineer, or just trying to grasp the big picture,
+drag things around, zoom in and out, and begin to visualize how your services are connected.
+
+Customize what the service map displays using either the query bar or the environment selector.
+The query bar enables you to use advanced queries to customize the service map based on your needs.
+The environment selector allows you to narrow displayed results to a specific environment.
+This can be useful if you have two or more services, in separate environments, but with the same name.
+Use the environment drop-down to only see the data you're interested in, like `dev` or `production`.
+
+If there's a specific service that interests you, select that service to highlight its connections.
+Click **Focus map** to refocus the map on the selected service and lock the connection highlighting.
+Click the **Transactions** tab to jump to the Transaction overview for the selected service.
+You can also use the tabs at the top of the page to easily jump to the **Errors** or **Metrics** overview.
+
+![Example view of service maps in the Applications UI](images/service-maps/service-maps-java.png)
+
+## Anomaly detection with machine learning
+
+You can create machine learning jobs to calculate anomaly scores on APM transaction durations within the selected service.
+When these jobs are active, service maps will display a color-coded anomaly indicator based on the detected anomaly score:
+
+
+
+
+
+
+
+ Max anomaly score **≤25**. Service is healthy.
+
+
+
+
+
+
+
+ Max anomaly score **26-74**. Anomalous activity detected. Service may be degraded.
+
+
+
+
+
+
+
+ Max anomaly score **≥75**. Anomalous activity detected. Service is unhealthy.
+
+
+
+
+![Example view of anomaly scores on service maps in the Applications UI](images/service-maps/service-map-anomaly.png)
+
+If an anomaly has been detected, click **View anomalies** to view the anomaly detection metric viewer.
+This time series analysis will display additional details on the severity and time of the detected anomalies.
+
+To learn how to create a machine learning job, refer to .
+
+## Legend
+
+Nodes appear on the map in one of two shapes:
+
+* **Circle**: Instrumented services. Interior icons are based on the language of the APM agent used.
+* **Diamond**: Databases, external, and messaging. Interior icons represent the generic type,
+ with specific icons for known entities, like Elasticsearch.
+ Type and subtype are based on `span.type`, and `span.subtype`.
+
+## Supported APM agents
+
+Service Maps are supported for the following APM agent versions:
+
+| | |
+|---|---|
+| Go agent | ≥ v1.7.0 |
+| Java agent | ≥ v1.13.0 |
+| .NET agent | ≥ v1.3.0 |
+| Node.js agent | ≥ v3.6.0 |
+| PHP agent | ≥ v1.2.0 |
+| Python agent | ≥ v5.5.0 |
+| Ruby agent | ≥ v3.6.0 |
+
diff --git a/docs/en/serverless/apm/apm-ui-service-overview.mdx b/docs/en/serverless/apm/apm-ui-service-overview.mdx
new file mode 100644
index 0000000000..dba11ec117
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-service-overview.mdx
@@ -0,0 +1,135 @@
+---
+id: serverlessObservabilityApmServiceOverview
+slug: /serverless/observability/apm-service-overview
+title: Service Overview
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import ThroughputTransactions from '../transclusion/kibana/apm/service-overview/throughput-transactions.mdx'
+import Ftr from '../transclusion/kibana/apm/service-overview/ftr.mdx'
+import Dependencies from '../transclusion/kibana/apm/service-overview/dependencies.mdx'
+
+Selecting a {/* non-mobile */} **service** brings you to the **Service overview**.
+The **Service overview** contains a wide variety of charts and tables that provide
+high-level visibility into how a service is performing across your infrastructure:
+
+* Service details like service version, runtime version, framework, and APM agent name and version
+* Container and orchestration information
+* Cloud provider, machine type, service name, region, and availability zone
+* Serverless function names and event trigger type
+* Latency, throughput, and errors over time
+* Service dependencies
+
+## Time series and expected bounds comparison
+
+For insight into the health of your services, you can compare how a service
+performs relative to a previous time frame or to the expected bounds from the
+corresponding ((anomaly-job)). For example, has latency been slowly increasing
+over time, did the service experience a sudden spike, is the throughput similar
+to what the ((ml)) job expects — enabling a comparison can provide the answer.
+
+![Time series and expected bounds comparison](images/services/time-series-expected-bounds-comparison.png)
+
+Select the **Comparison** box to apply a time-based or expected bounds comparison.
+The time-based comparison options are based on the selected time filter range:
+
+| Time filter | Time comparison options |
+|---|---|
+| ≤ 24 hours | One day or one week |
+| \> 24 hours and ≤ 7 days | One week |
+| \> 7 days | An identical amount of time immediately before the selected time range |
+
+The expected bounds comparison is powered by machine learning and requires anomaly detection to be enabled.
+
+## Latency
+
+Response times for the service. You can filter the **Latency** chart to display the average,
+95th, or 99th percentile latency times for the service.
+
+![Service latency](images/services/latency.png)
+
+## Throughput and transactions
+
+
+
+## Failed transaction rate and errors
+
+
+
+The **Errors** table provides a high-level view of each error message when it first and last occurred,
+along with the total number of occurrences. This makes it very easy to quickly see which errors affect
+your services and take actions to rectify them. To do so, click **View errors**.
+
+![failed transaction rate and errors](images/services/error-rate.png)
+
+## Span types average duration and dependencies
+
+The **Time spent by span type** chart visualizes each span type's average duration and helps you determine
+which spans could be slowing down transactions. The "app" label displayed under the
+chart indicates that something was happening within the application. This could signal that the APM
+agent does not have auto-instrumentation for whatever was happening during that time or that the time was spent in the
+application code and not in database or external requests.
+
+
+
+## Cold start rate
+
+The cold start rate chart is specific to serverless services, and displays the
+percentage of requests that trigger a cold start of a serverless function.
+A cold start occurs when a serverless function has not been used for a certain period of time.
+Analyzing the cold start rate can be useful for deciding how much memory to allocate to a function,
+or when to remove a large dependency.
+
+The cold start rate chart is currently supported for AWS Lambda
+functions and Azure functions.
+
+## Instances
+
+The **Instances** table displays a list of all the available service instances within the selected time range.
+Depending on how the service runs, the instance could be a host or a container. The table displays latency, throughput,
+failed transaction, CPU usage, and memory usage for each instance. By default, instances are sorted by _Throughput_.
+
+![All instances](images/services/all-instances.png)
+
+## Service metadata
+
+To view metadata relating to the service agent, and if relevant, the container and cloud provider,
+click on each icon located at the top of the page beside the service name.
+
+![Service metadata](images/services/metadata-icons.png)
+
+**Service information**
+
+* Service version
+* Runtime name and version
+* Framework name
+* APM agent name and version
+
+**Container information**
+
+* Operating system
+* Containerized (yes or no)
+* Total number of instances
+* Orchestration
+
+**Cloud provider information**
+
+* Cloud provider
+* Cloud service name
+* Availability zones
+* Machine types
+* Project ID
+* Region
+
+**Serverless information**
+
+* Function name(s)
+* Event trigger type
+
+**Alerts**
+
+* Recently fired alerts
+
diff --git a/docs/en/serverless/apm/apm-ui-services.mdx b/docs/en/serverless/apm/apm-ui-services.mdx
new file mode 100644
index 0000000000..b703e61780
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-services.mdx
@@ -0,0 +1,66 @@
+---
+id: serverlessObservabilityApmServices
+slug: /serverless/observability/apm-services
+title: Services
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import FeatureBeta from '../partials/feature-beta.mdx'
+
+The **Services** inventory provides a quick, high-level overview of the health and general
+performance of all instrumented services.
+
+To help surface potential issues, services are sorted by their health status:
+**critical** → **warning** → **healthy** → **unknown**.
+Health status is powered by machine learning
+and requires anomaly detection to be enabled.
+
+In addition to health status, active alerts for each service are prominently displayed in the service inventory table. Selecting an active alert badge brings you to the **Alerts** tab where you can learn more about the active alert and take action.
+
+![Example view of services table the Applications UI](images/services/apm-services-overview.png)
+
+## Service groups
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+Group services together to build meaningful views that remove noise, simplify investigations across services,
+and combine related alerts.
+
+{/* This screenshot is reused in the alerts docs */}
+{/* Ensure it has an active alert showing */}
+![Example view of service group in the Applications UI](images/services/apm-service-group.png)
+
+To create a service group:
+
+1. In your ((observability)) project, go to **Applications** → **Services**.
+1. Switch to **Service groups**.
+1. Click **Create group**.
+1. Specify a name, color, and description.
+1. Click **Select services**.
+1. Specify a [Kibana Query Language (KQL)](((kibana-ref))/kuery-query.html) query to select
+ services for the group. Services that match the query within the last 24 hours will be assigned to the group.
+
+
+
+Once a service group has been saved, this list of services within it is static.
+If a newly added service matches the KQL query, it will not be automatically added to the service group.
+Similarly, if a service stops matching the KQL query, it will not be removed from the group.
+
+To update the list of services within a group,
+edit the service group, click **Refresh** next to the KQL query, and click **Save group**.
+
+
+
+### Examples
+
+Not sure where to get started? Here are some sample queries you can build from:
+
+* **Group services by environment**: To group "production" services, use `service.environment : "production"`.
+* **Group services by name**: To group all services that end in "beat", use `service.name : *beat`. This will match services named "Auditbeat", "Heartbeat", "Filebeat", and so on.
diff --git a/docs/en/serverless/apm/apm-ui-trace-sample-timeline.mdx b/docs/en/serverless/apm/apm-ui-trace-sample-timeline.mdx
new file mode 100644
index 0000000000..be06a53d4a
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-trace-sample-timeline.mdx
@@ -0,0 +1,76 @@
+---
+id: serverlessObservabilityApmTraceSampleTimeline
+slug: /serverless/observability/apm-trace-sample-timeline
+title: Trace sample timeline
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+The trace sample timeline visualization is a high-level view of what your application was doing while it was trying to respond to a request.
+This makes it useful for visualizing where a selected transaction spent most of its time.
+
+![Example view of transactions sample](images/transactions/apm-transaction-sample.png)
+
+View a span in detail by clicking on it in the timeline waterfall.
+For example, when you click on an SQL Select database query,
+the information displayed includes the actual SQL that was executed, how long it took,
+and the percentage of the trace's total time.
+You also get a stack trace, which shows the SQL query in your code.
+Finally, APM knows which files are your code and which are just modules or libraries that you've installed.
+These library frames will be minimized by default in order to show you the most relevant stack trace.
+
+
+A [span](((apm-guide-ref))/data-model-spans.html) is the duration of a single event.
+Spans are automatically captured by APM agents, and you can also define custom spans.
+Each span has a type and is defined by a different color in the timeline/waterfall visualization.
+
+
+![Example view of a span detail in the Applications UI](images/spans/apm-span-detail.png)
+
+## Investigate
+
+The trace sample timeline features an **Investigate** button which provides a quick way to jump
+to other areas of the Elastic Observability UI while maintaining the context of the currently selected trace sample.
+For example, quickly view:
+
+* logs and metrics for the selected pod
+* logs and metrics for the selected host
+* trace logs for the selected `trace.id`
+* uptime status of the selected domain
+* the service map filtered by the selected trace
+* the selected transaction in **Discover**
+* your custom links
+
+## Distributed tracing
+
+When a trace travels through multiple services it is known as a _distributed trace_.
+In the Applications UI, the colors in a distributed trace represent different services and
+are listed in the order they occur.
+
+![Example of distributed trace colors in the Applications UI](images/spans/apm-services-trace.png)
+
+As application architectures are shifting from monolithic to more distributed, service-based architectures,
+distributed tracing has become a crucial feature of modern application performance monitoring.
+It allows you to trace requests through your service architecture automatically, and visualize those traces in one single view in the Applications UI.
+From initial web requests to your front-end service, to queries made to your back-end services,
+this makes finding possible bottlenecks throughout your application much easier and faster.
+
+![Example view of the distributed tracing in the Applications UI](images/spans/apm-distributed-tracing.png)
+
+Don't forget; by definition, a distributed trace includes more than one transaction.
+When viewing distributed traces in the timeline waterfall,
+you'll see this icon: ,
+which indicates the next transaction in the trace.
+For easier problem isolation, transactions can be collapsed in the waterfall by clicking
+the icon to the left of the transactions.
+Transactions can also be expanded and viewed in detail by clicking on them.
+
+After exploring these traces,
+you can return to the full trace by clicking **View full trace**.
+
+
+Distributed tracing is supported by all APM agents, and there's no additional configuration needed.
+
+
diff --git a/docs/en/serverless/apm/apm-ui-traces.mdx b/docs/en/serverless/apm/apm-ui-traces.mdx
new file mode 100644
index 0000000000..7b52277cd0
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-traces.mdx
@@ -0,0 +1,41 @@
+---
+id: serverlessObservabilityApmTraces
+slug: /serverless/observability/apm-traces
+title: Traces
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+Traces link together related transactions to show an end-to-end performance of how a request was served
+and which services were part of it.
+In addition to the Traces overview, you can view your application traces in the trace sample timeline waterfall.
+
+
+**Traces** displays your application's entry (root) transactions.
+Transactions with the same name are grouped together and only shown once in this table.
+If you're using distributed tracing,
+this view is key to finding the critical paths within your application.
+
+By default, transactions are sorted by _Impact_.
+Impact helps show the most used and slowest endpoints in your service — in other words,
+it's the collective amount of pain a specific endpoint is causing your users.
+If there's a particular endpoint you're worried about, select it to view its
+transaction details.
+
+You can also use queries to filter and search the transactions shown on this page. Note that only properties available on root transactions are searchable. For example, you can't search for `label.tier: 'high'`, as that field is only available on non-root transactions.
+
+![Example view of the Traces overview in the Applications UI](images/traces/apm-traces.png)
+
+## Trace explorer
+
+{/* */}
+**Trace explorer** is an experimental top-level search tool that allows you to query your traces using [Kibana Query Language (KQL)](((kibana-ref))/kuery-query.html) or [Event Query Language (EQL)](((ref))/eql.html).
+
+Curate your own custom queries, or use the to find and select edges to automatically generate queries based on your selection:
+
+![Trace explorer](images/traces/trace-explorer.png)
+
diff --git a/docs/en/serverless/apm/apm-ui-transactions.mdx b/docs/en/serverless/apm/apm-ui-transactions.mdx
new file mode 100644
index 0000000000..d304798abb
--- /dev/null
+++ b/docs/en/serverless/apm/apm-ui-transactions.mdx
@@ -0,0 +1,179 @@
+---
+id: serverlessObservabilityApmTransactions
+slug: /serverless/observability/apm-transactions
+title: Transactions
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+import LogOverview from '../transclusion/kibana/logs/log-overview.mdx'
+
+A _transaction_ describes an event captured by an Elastic APM agent instrumenting a service.
+APM agents automatically collect performance metrics on HTTP requests, database queries, and much more.
+The **Transactions** tab shows an overview of all transactions.
+
+![Example view of transactions table in the Applications UI](images/transactions/apm-transactions-overview.png)
+
+The **Latency**, **Throughput**, **Failed transaction rate**, **Time spent by span type**, and **Cold start rate**
+charts display information on all transactions associated with the selected service:
+
+
+ **Latency**
+
+ Response times for the service. Options include average, 95th, and 99th percentile.
+ If there's a weird spike that you'd like to investigate,
+ you can simply zoom in on the graph — this will adjust the specific time range,
+ and all of the data on the page will update accordingly.
+
+ **Throughput**
+
+ Visualize response codes: `2xx`, `3xx`, `4xx`, and so on.
+ Useful for determining if more responses than usual are being served with a particular response code.
+ Like in the latency graph, you can zoom in on anomalies to further investigate them.
+
+ **Failed transaction rate**
+
+ The failed transaction rate represents the percentage of failed transactions from the perspective of the selected service.
+ It's useful for visualizing unexpected increases, decreases, or irregular patterns in a service's transactions.
+
+
+
+ HTTP **transactions** from the HTTP server perspective do not consider a `4xx` status code (client error) as a failure
+ because the failure was caused by the caller, not the HTTP server. Thus, `event.outcome=success` and there will be no increase in failed transaction rate.
+
+ HTTP **spans** from the client perspective however, are considered failures if the HTTP status code is ≥ 400.
+ These spans will set `event.outcome=failure` and increase the failed transaction rate.
+
+ If there is no HTTP status, both transactions and spans are considered successful unless an error is reported.
+
+
+
+ **Time spent by span type**
+
+ Visualize where your application is spending most of its time.
+ For example, is your app spending time in external calls, database processing, or application code execution?
+
+ The time a transaction took to complete is also recorded and displayed on the chart under the "app" label.
+ "app" indicates that something was happening within the application, but we're not sure exactly what.
+ This could be a sign that the APM agent does not have auto-instrumentation for whatever was happening during that time.
+
+ It's important to note that if you have asynchronous spans, the sum of all span times may exceed the duration of the transaction.
+
+ **Cold start rate**
+
+ Only applicable to serverless transactions, this chart displays the percentage of requests that trigger a cold start of a serverless function.
+ See Cold starts for more information.
+
+
+
+## Transactions table
+
+The **Transactions** table displays a list of _transaction groups_ for the selected service.
+In other words, this view groups all transactions of the same name together,
+and only displays one entry for each group.
+
+![Example view of the transactions table in the Applications UI](images/transactions/apm-transactions-table.png)
+
+By default, transaction groups are sorted by _Impact_.
+Impact helps show the most used and slowest endpoints in your service — in other words,
+it's the collective amount of pain a specific endpoint is causing your users.
+If there's a particular endpoint you're worried about, you can click on it to view the transaction details.
+
+
+
+If you only see one route in the Transactions table, or if you have transactions named "unknown route",
+it could be a symptom that the APM agent either wasn't installed correctly or doesn't support your framework.
+
+For further details, including troubleshooting and custom implementation instructions,
+refer to the documentation for each APM Agent you've implemented.
+
+
+
+
+
+## Transaction details
+
+Selecting a transaction group will bring you to the **transaction** details.
+This page is visually similar to the transaction overview, but it shows data from all transactions within
+the selected transaction group.
+
+![Example view of transactions table in the Applications UI](images/transactions/apm-transactions-overview.png)
+
+
+
+### Latency distribution
+
+The latency distribution shows a plot of all transaction durations for the given time period.
+The following screenshot shows a typical distribution
+and indicates most of our requests were served quickly — awesome!
+The requests on the right are taking longer than average; we probably need to focus on them.
+
+![Example view of latency distribution graph](images/transactions/apm-transaction-duration-dist.png)
+
+Click and drag to select a latency duration _bucket_ to display up to 500 trace samples.
+
+
+
+### Trace samples
+
+Trace samples are based on the _bucket_ selection in the **Latency distribution** chart;
+update the samples by selecting a new _bucket_.
+The number of requests per bucket is displayed when hovering over the graph,
+and the selected bucket is highlighted to stand out.
+
+Each bucket presents up to ten trace samples in a **timeline**, trace sample **metadata**,
+and any related **logs**.
+
+**Trace sample timeline**
+
+Each sample has a trace timeline waterfall that shows how a typical request in that bucket executed.
+This waterfall is useful for understanding the parent/child hierarchy of transactions and spans,
+and ultimately determining _why_ a request was slow.
+For large waterfalls, expand problematic transactions and collapse well-performing ones
+for easier problem isolation and troubleshooting.
+
+![Example view of transactions sample](images/transactions/apm-transaction-sample.png)
+
+
+More information on timeline waterfalls is available in spans.
+
+
+**Trace sample metadata**
+
+Learn more about a trace sample in the **Metadata** tab:
+
+* Labels: Custom labels added by APM agents
+* HTTP request/response information
+* Host information
+* Container information
+* Service: The service/application runtime, APM agent, name, etc..
+* Process: The process id that served up the request.
+* APM agent information
+* URL
+* User: Requires additional configuration, but allows you to see which user experienced the current transaction.
+* FaaS information, like cold start, AWS request ID, trigger type, and trigger request ID
+
+
+All of this data is stored in documents in Elasticsearch.
+This means you can select "Actions - View transaction in Discover" to see the actual Elasticsearch document under the discover tab.
+
+
+**Trace sample logs**
+
+The **Logs** tab displays logs related to the sampled trace.
+
+
+
+![APM logs tab](images/transactions/apm-logs-tab.png)
+
+
+
+### Correlations
+
+Correlations surface attributes of your data that are potentially correlated with high-latency or erroneous transactions.
+To learn more, see Find transaction latency and failure correlations.
+
+![APM latency correlations](images/transactions/correlations-hover.png)
+
diff --git a/docs/en/serverless/apm/apm-view-and-analyze-traces.mdx b/docs/en/serverless/apm/apm-view-and-analyze-traces.mdx
new file mode 100644
index 0000000000..ff7bca15e2
--- /dev/null
+++ b/docs/en/serverless/apm/apm-view-and-analyze-traces.mdx
@@ -0,0 +1,27 @@
+---
+id: serverlessObservabilityApmViewAndAnalyzeTraces
+slug: /serverless/observability/apm-view-and-analyze-traces
+title: View and analyze traces
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+APM allows you to monitor your software services and applications in real time;
+visualize detailed performance information on your services,
+identify and analyze errors,
+and monitor host-level and APM agent-specific metrics like JVM and Go runtime metrics.
+
+## Visualizing application bottlenecks
+
+Having access to application-level insights with just a few clicks can drastically decrease the time you spend
+debugging errors, slow response times, and crashes.
+
+For example, you can see information about response times, requests per minute, and status codes per endpoint.
+You can even dive into a specific request sample and get a complete waterfall view of what your application is spending its time on.
+You might see that your bottlenecks are in database queries, cache calls, or external requests.
+For each incoming request and each application error,
+you can also see contextual information such as the request header, user information,
+system values, or custom data that you manually attached to the request.
+
diff --git a/docs/en/serverless/apm/apm.mdx b/docs/en/serverless/apm/apm.mdx
new file mode 100644
index 0000000000..8d8680f2cc
--- /dev/null
+++ b/docs/en/serverless/apm/apm.mdx
@@ -0,0 +1,28 @@
+---
+id: serverlessObservabilityApm
+slug: /serverless/observability/apm
+title: Application performance monitoring (APM)
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Elastic APM is an application performance monitoring system.
+It allows you to monitor software services and applications in real time, by
+collecting detailed performance information on response time for incoming requests,
+database queries, calls to caches, external HTTP requests, and more.
+This makes it easy to pinpoint and fix performance problems quickly.
+
+Elastic APM also automatically collects unhandled errors and exceptions.
+Errors are grouped based primarily on the stack trace,
+so you can identify new errors as they appear and keep an eye on how many times specific errors happen.
+
+Metrics are another vital source of information when debugging production systems.
+Elastic APM agents automatically pick up basic host-level metrics and agent-specific metrics,
+like JVM metrics in the Java Agent, and Go runtime metrics in the Go Agent.
+
+## Give Elastic APM a try
+
+Ready to give Elastic APM a try? See Get started with traces and APM.
+
diff --git a/docs/en/serverless/apm/images/advanced-queries/advanced-discover.png b/docs/en/serverless/apm/images/advanced-queries/advanced-discover.png
new file mode 100644
index 0000000000..5291526783
Binary files /dev/null and b/docs/en/serverless/apm/images/advanced-queries/advanced-discover.png differ
diff --git a/docs/en/serverless/apm/images/advanced-queries/apm-query-bar.png b/docs/en/serverless/apm/images/advanced-queries/apm-query-bar.png
new file mode 100644
index 0000000000..457573f485
Binary files /dev/null and b/docs/en/serverless/apm/images/advanced-queries/apm-query-bar.png differ
diff --git a/docs/en/serverless/apm/images/advanced-queries/specific-transaction-search.png b/docs/en/serverless/apm/images/advanced-queries/specific-transaction-search.png
new file mode 100644
index 0000000000..4ed548f015
Binary files /dev/null and b/docs/en/serverless/apm/images/advanced-queries/specific-transaction-search.png differ
diff --git a/docs/en/serverless/apm/images/advanced-queries/specific-transaction.png b/docs/en/serverless/apm/images/advanced-queries/specific-transaction.png
new file mode 100644
index 0000000000..52073bf765
Binary files /dev/null and b/docs/en/serverless/apm/images/advanced-queries/specific-transaction.png differ
diff --git a/docs/en/serverless/apm/images/annotations/apm-transaction-annotation.png b/docs/en/serverless/apm/images/annotations/apm-transaction-annotation.png
new file mode 100644
index 0000000000..b9360db2ff
Binary files /dev/null and b/docs/en/serverless/apm/images/annotations/apm-transaction-annotation.png differ
diff --git a/docs/en/serverless/apm/images/apm-lambda/lambda-overview.png b/docs/en/serverless/apm/images/apm-lambda/lambda-overview.png
new file mode 100644
index 0000000000..9d0558949f
Binary files /dev/null and b/docs/en/serverless/apm/images/apm-lambda/lambda-overview.png differ
diff --git a/docs/en/serverless/apm/images/correlations/correlations-failed-transactions.png b/docs/en/serverless/apm/images/correlations/correlations-failed-transactions.png
new file mode 100644
index 0000000000..19221e751e
Binary files /dev/null and b/docs/en/serverless/apm/images/correlations/correlations-failed-transactions.png differ
diff --git a/docs/en/serverless/apm/images/custom-links/create-github-issue.png b/docs/en/serverless/apm/images/custom-links/create-github-issue.png
new file mode 100644
index 0000000000..81ea4e5e78
Binary files /dev/null and b/docs/en/serverless/apm/images/custom-links/create-github-issue.png differ
diff --git a/docs/en/serverless/apm/images/custom-links/create-jira-issue.png b/docs/en/serverless/apm/images/custom-links/create-jira-issue.png
new file mode 100644
index 0000000000..962c98df3f
Binary files /dev/null and b/docs/en/serverless/apm/images/custom-links/create-jira-issue.png differ
diff --git a/docs/en/serverless/apm/images/custom-links/example-metadata.png b/docs/en/serverless/apm/images/custom-links/example-metadata.png
new file mode 100644
index 0000000000..2a5bda7f08
Binary files /dev/null and b/docs/en/serverless/apm/images/custom-links/example-metadata.png differ
diff --git a/docs/en/serverless/apm/images/dependencies/dependencies-drilldown.png b/docs/en/serverless/apm/images/dependencies/dependencies-drilldown.png
new file mode 100644
index 0000000000..af82ee3d93
Binary files /dev/null and b/docs/en/serverless/apm/images/dependencies/dependencies-drilldown.png differ
diff --git a/docs/en/serverless/apm/images/dependencies/dependencies.png b/docs/en/serverless/apm/images/dependencies/dependencies.png
new file mode 100644
index 0000000000..543908274e
Binary files /dev/null and b/docs/en/serverless/apm/images/dependencies/dependencies.png differ
diff --git a/docs/en/serverless/apm/images/dependencies/operations-detail.png b/docs/en/serverless/apm/images/dependencies/operations-detail.png
new file mode 100644
index 0000000000..64a1c65508
Binary files /dev/null and b/docs/en/serverless/apm/images/dependencies/operations-detail.png differ
diff --git a/docs/en/serverless/apm/images/dependencies/operations.png b/docs/en/serverless/apm/images/dependencies/operations.png
new file mode 100644
index 0000000000..119f8bdf99
Binary files /dev/null and b/docs/en/serverless/apm/images/dependencies/operations.png differ
diff --git a/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex1.png b/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex1.png
new file mode 100644
index 0000000000..ca97955ee8
Binary files /dev/null and b/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex1.png differ
diff --git a/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex2.png b/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex2.png
new file mode 100644
index 0000000000..3df0827f58
Binary files /dev/null and b/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex2.png differ
diff --git a/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex3.png b/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex3.png
new file mode 100644
index 0000000000..1bb666b030
Binary files /dev/null and b/docs/en/serverless/apm/images/distributed-tracing/dt-trace-ex3.png differ
diff --git a/docs/en/serverless/apm/images/errors/apm-error-group.png b/docs/en/serverless/apm/images/errors/apm-error-group.png
new file mode 100644
index 0000000000..22bceb9d81
Binary files /dev/null and b/docs/en/serverless/apm/images/errors/apm-error-group.png differ
diff --git a/docs/en/serverless/apm/images/errors/apm-errors-overview.png b/docs/en/serverless/apm/images/errors/apm-errors-overview.png
new file mode 100644
index 0000000000..c390b7ddc0
Binary files /dev/null and b/docs/en/serverless/apm/images/errors/apm-errors-overview.png differ
diff --git a/docs/en/serverless/apm/images/filters/global-filters.png b/docs/en/serverless/apm/images/filters/global-filters.png
new file mode 100644
index 0000000000..dc9b36277a
Binary files /dev/null and b/docs/en/serverless/apm/images/filters/global-filters.png differ
diff --git a/docs/en/serverless/apm/images/infrastructure/infra.png b/docs/en/serverless/apm/images/infrastructure/infra.png
new file mode 100644
index 0000000000..e139012270
Binary files /dev/null and b/docs/en/serverless/apm/images/infrastructure/infra.png differ
diff --git a/docs/en/serverless/apm/images/logs/logs.png b/docs/en/serverless/apm/images/logs/logs.png
new file mode 100644
index 0000000000..94d77b4749
Binary files /dev/null and b/docs/en/serverless/apm/images/logs/logs.png differ
diff --git a/docs/en/serverless/apm/images/metrics/apm-metrics.png b/docs/en/serverless/apm/images/metrics/apm-metrics.png
new file mode 100644
index 0000000000..c2d609c7c4
Binary files /dev/null and b/docs/en/serverless/apm/images/metrics/apm-metrics.png differ
diff --git a/docs/en/serverless/apm/images/metrics/jvm-metrics-overview.png b/docs/en/serverless/apm/images/metrics/jvm-metrics-overview.png
new file mode 100644
index 0000000000..c6f28f7bdf
Binary files /dev/null and b/docs/en/serverless/apm/images/metrics/jvm-metrics-overview.png differ
diff --git a/docs/en/serverless/apm/images/metrics/jvm-metrics.png b/docs/en/serverless/apm/images/metrics/jvm-metrics.png
new file mode 100644
index 0000000000..70f7965b72
Binary files /dev/null and b/docs/en/serverless/apm/images/metrics/jvm-metrics.png differ
diff --git a/docs/en/serverless/apm/images/service-maps/green-service.png b/docs/en/serverless/apm/images/service-maps/green-service.png
new file mode 100644
index 0000000000..bbc00a3543
Binary files /dev/null and b/docs/en/serverless/apm/images/service-maps/green-service.png differ
diff --git a/docs/en/serverless/apm/images/service-maps/red-service.png b/docs/en/serverless/apm/images/service-maps/red-service.png
new file mode 100644
index 0000000000..be7a62b177
Binary files /dev/null and b/docs/en/serverless/apm/images/service-maps/red-service.png differ
diff --git a/docs/en/serverless/apm/images/service-maps/service-map-anomaly.png b/docs/en/serverless/apm/images/service-maps/service-map-anomaly.png
new file mode 100644
index 0000000000..cd59f86690
Binary files /dev/null and b/docs/en/serverless/apm/images/service-maps/service-map-anomaly.png differ
diff --git a/docs/en/serverless/apm/images/service-maps/service-maps-java.png b/docs/en/serverless/apm/images/service-maps/service-maps-java.png
new file mode 100644
index 0000000000..aa8e5dc505
Binary files /dev/null and b/docs/en/serverless/apm/images/service-maps/service-maps-java.png differ
diff --git a/docs/en/serverless/apm/images/service-maps/yellow-service.png b/docs/en/serverless/apm/images/service-maps/yellow-service.png
new file mode 100644
index 0000000000..43afd6250b
Binary files /dev/null and b/docs/en/serverless/apm/images/service-maps/yellow-service.png differ
diff --git a/docs/en/serverless/apm/images/services/all-instances.png b/docs/en/serverless/apm/images/services/all-instances.png
new file mode 100644
index 0000000000..70028b5a9b
Binary files /dev/null and b/docs/en/serverless/apm/images/services/all-instances.png differ
diff --git a/docs/en/serverless/apm/images/services/apm-service-group.png b/docs/en/serverless/apm/images/services/apm-service-group.png
new file mode 100644
index 0000000000..44a0191411
Binary files /dev/null and b/docs/en/serverless/apm/images/services/apm-service-group.png differ
diff --git a/docs/en/serverless/apm/images/services/apm-services-overview.png b/docs/en/serverless/apm/images/services/apm-services-overview.png
new file mode 100644
index 0000000000..0badeea3be
Binary files /dev/null and b/docs/en/serverless/apm/images/services/apm-services-overview.png differ
diff --git a/docs/en/serverless/apm/images/services/error-rate.png b/docs/en/serverless/apm/images/services/error-rate.png
new file mode 100644
index 0000000000..845fa2af07
Binary files /dev/null and b/docs/en/serverless/apm/images/services/error-rate.png differ
diff --git a/docs/en/serverless/apm/images/services/latency.png b/docs/en/serverless/apm/images/services/latency.png
new file mode 100644
index 0000000000..1c220c1a4b
Binary files /dev/null and b/docs/en/serverless/apm/images/services/latency.png differ
diff --git a/docs/en/serverless/apm/images/services/metadata-icons.png b/docs/en/serverless/apm/images/services/metadata-icons.png
new file mode 100644
index 0000000000..0509e9c12a
Binary files /dev/null and b/docs/en/serverless/apm/images/services/metadata-icons.png differ
diff --git a/docs/en/serverless/apm/images/services/time-series-expected-bounds-comparison.png b/docs/en/serverless/apm/images/services/time-series-expected-bounds-comparison.png
new file mode 100644
index 0000000000..6e705064e6
Binary files /dev/null and b/docs/en/serverless/apm/images/services/time-series-expected-bounds-comparison.png differ
diff --git a/docs/en/serverless/apm/images/spans/apm-distributed-tracing.png b/docs/en/serverless/apm/images/spans/apm-distributed-tracing.png
new file mode 100644
index 0000000000..4d1b8cde20
Binary files /dev/null and b/docs/en/serverless/apm/images/spans/apm-distributed-tracing.png differ
diff --git a/docs/en/serverless/apm/images/spans/apm-services-trace.png b/docs/en/serverless/apm/images/spans/apm-services-trace.png
new file mode 100644
index 0000000000..083c69318e
Binary files /dev/null and b/docs/en/serverless/apm/images/spans/apm-services-trace.png differ
diff --git a/docs/en/serverless/apm/images/spans/apm-span-detail.png b/docs/en/serverless/apm/images/spans/apm-span-detail.png
new file mode 100644
index 0000000000..d0b6a4de3d
Binary files /dev/null and b/docs/en/serverless/apm/images/spans/apm-span-detail.png differ
diff --git a/docs/en/serverless/apm/images/traces/apm-traces.png b/docs/en/serverless/apm/images/traces/apm-traces.png
new file mode 100644
index 0000000000..c8b8d40b01
Binary files /dev/null and b/docs/en/serverless/apm/images/traces/apm-traces.png differ
diff --git a/docs/en/serverless/apm/images/traces/trace-explorer.png b/docs/en/serverless/apm/images/traces/trace-explorer.png
new file mode 100644
index 0000000000..70c13f650e
Binary files /dev/null and b/docs/en/serverless/apm/images/traces/trace-explorer.png differ
diff --git a/docs/en/serverless/apm/images/transactions/apm-logs-tab.png b/docs/en/serverless/apm/images/transactions/apm-logs-tab.png
new file mode 100644
index 0000000000..c79be8b5eb
Binary files /dev/null and b/docs/en/serverless/apm/images/transactions/apm-logs-tab.png differ
diff --git a/docs/en/serverless/apm/images/transactions/apm-transaction-duration-dist.png b/docs/en/serverless/apm/images/transactions/apm-transaction-duration-dist.png
new file mode 100644
index 0000000000..9c7ab5dd67
Binary files /dev/null and b/docs/en/serverless/apm/images/transactions/apm-transaction-duration-dist.png differ
diff --git a/docs/en/serverless/apm/images/transactions/apm-transaction-sample.png b/docs/en/serverless/apm/images/transactions/apm-transaction-sample.png
new file mode 100644
index 0000000000..a9490fc20d
Binary files /dev/null and b/docs/en/serverless/apm/images/transactions/apm-transaction-sample.png differ
diff --git a/docs/en/serverless/apm/images/transactions/apm-transactions-overview.png b/docs/en/serverless/apm/images/transactions/apm-transactions-overview.png
new file mode 100644
index 0000000000..34cd0219b8
Binary files /dev/null and b/docs/en/serverless/apm/images/transactions/apm-transactions-overview.png differ
diff --git a/docs/en/serverless/apm/images/transactions/apm-transactions-table.png b/docs/en/serverless/apm/images/transactions/apm-transactions-table.png
new file mode 100644
index 0000000000..8a3415bc9a
Binary files /dev/null and b/docs/en/serverless/apm/images/transactions/apm-transactions-table.png differ
diff --git a/docs/en/serverless/apm/images/transactions/correlations-hover.png b/docs/en/serverless/apm/images/transactions/correlations-hover.png
new file mode 100644
index 0000000000..9731517b32
Binary files /dev/null and b/docs/en/serverless/apm/images/transactions/correlations-hover.png differ
diff --git a/docs/en/serverless/cases/cases.mdx b/docs/en/serverless/cases/cases.mdx
new file mode 100644
index 0000000000..982c17407f
--- /dev/null
+++ b/docs/en/serverless/cases/cases.mdx
@@ -0,0 +1,18 @@
+---
+id: serverlessObservabilityCases
+slug: /serverless/observability/cases
+title: Cases
+description: Use cases to track progress toward solving problems detected in Elastic Observability.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Collect and share information about observability issues by creating a case.
+Cases allow you to track key investigation details,
+add assignees and tags to your cases, set their severity and status, and add alerts,
+comments, and visualizations. You can also send cases to third-party systems by
+configuring external connectors.
+
+![Cases page](../images/cases.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
\ No newline at end of file
diff --git a/docs/en/serverless/cases/create-manage-cases.mdx b/docs/en/serverless/cases/create-manage-cases.mdx
new file mode 100644
index 0000000000..c114ffde67
--- /dev/null
+++ b/docs/en/serverless/cases/create-manage-cases.mdx
@@ -0,0 +1,107 @@
+---
+id: serverlessObservabilityCreateANewCase
+slug: /serverless/observability/create-a-new-case
+title: Create and manage cases
+description: Learn how to create a case, add files, and manage the case over time.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+Open a new case to keep track of issues and share the details with colleagues.
+To create a case in your Observability project:
+
+1. In your ((observability)) project, go to **Cases**.
+1. Click **Create case**.
+1. Give the case a name, severity, and description.
+
+
+ In the `Description` area, you can use
+ [Markdown](https://www.markdownguide.org/cheat-sheet) syntax to create formatted text.
+
+
+1. (Optional) Add a category, assignees, and tags.
+ {/* To do: Need to verify that a viewer cannot be assigned to a case
+ (all I know is that they can _view_ the case) */}
+ You can add users who are assigned the Editor user role (or a more permissive role) for the project.
+
+1. (Optional) Under External incident management system, you can select a connector to send cases to an external system.
+ If you've created any connectors previously, they will be listed here.
+ If there are no connectors listed, you can create one.
+
+1. After you've completed all of the required fields, click **Create case**.
+
+
+You can also create a case from an alert or add an alert to an existing case. From the **Alerts** page, click the **More options** icon and choose either **Add to existing case** or **Create new case**, and select or complete the details as required.
+
+
+## Add files
+
+After you create a case, you can upload and manage files on the **Files** tab:
+
+![A list of files attached to a case](../images/cases-files-tab.png)
+{/* NOTE: This is an autogenerated screenshot. Do not edit it directly. */}
+
+To download or delete the file or copy the file hash to your clipboard, open the action menu (…).
+The available hash functions are MD5, SHA-1, and SHA-256.
+
+When you upload a file, a comment is added to the case activity log.
+To view an image, click its name in the activity or file list.
+
+
+Uploaded files are also accessible under **Project settings** → **Management** → **Files**.
+When you export cases as [saved objects](((kibana-ref))/managing-saved-objects.html), the case files are not exported.
+
+
+You can add images and text, CSV, JSON, PDF, or ZIP files.
+For the complete list, check [`mime_types.ts`](https://github.com/elastic/kibana/blob/main/x-pack/plugins/cases/common/constants/mime_types.ts).
+
+
+There is a 10 MiB size limit for images. For all other MIME types, the limit is 100 MiB.
+
+
+{/*
+
+NOTE: Email notifications are not available in Observability projects yet.
+
+## Add email notifications
+
+You can configure email notifications that occur when users are assigned to
+cases.
+
+To do this, add the email addresses to the monitoring email allowlist.
+Follow the steps in [Send alerts by email](((cloud))/ec-watcher.html#ec-watcher-allowlist).
+
+You do not need to configure an email connector or update
+user settings, since the preconfigured Elastic-Cloud-SMTP connector is
+used by default.
+
+When you subsequently add assignees to cases, they receive an email.
+
+*/}
+
+## Manage existing cases
+
+You can search existing cases and filter them by attributes such as assignees,
+categories, severity, status, and tags. You can also select multiple cases and use bulk
+actions to delete cases or change their attributes.
+
+To view a case, click on its name. You can then:
+
+* Add a new comment.
+* Edit existing comments and the description.
+* Add or remove assignees.
+* Add a connector (if you did not select one while creating the case).
+* Send updates to external systems (if external connections are configured).
+* Edit the category and tags.
+* Change the status.
+* Change the severity.
+* Remove an alert.
+* Refresh the case to retrieve the latest updates.
+* Close the case.
+* Reopen a closed case.
+
diff --git a/docs/en/serverless/cases/send-cases-to-an-external-system.mdx b/docs/en/serverless/cases/send-cases-to-an-external-system.mdx
new file mode 100644
index 0000000000..9daf88cade
--- /dev/null
+++ b/docs/en/serverless/cases/send-cases-to-an-external-system.mdx
@@ -0,0 +1,87 @@
+---
+id: serverlessObservabilitySendCasesToAnExternalSystem
+slug: /serverless/observability/send-cases-to-an-external-system
+title: Send cases to an external system
+description: Connectors allow you track Elastic Observability cases in external systems.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+If you are using an external incident management system, you can integrate Elastic Observability
+cases with this system using connectors. These third-party systems are supported:
+
+* ((ibm-r))
+* ((jira)) (including ((jira)) Service Desk)
+* ((sn-itsm))
+* ((sn-sir))
+* ((swimlane))
+* ((webhook-cm))
+
+You need to create a connector to send cases, which stores the information required to interact
+with an external system. For each case, you can send the title, description, and comment when
+you choose to push the case — for the **Webhook - Case Management** connector, you can also
+send the status and severity fields.
+
+
+{/* TODO: Verify user roles needed to create connectors... */}
+To add, modify, or delete a connector, you must have the Admin user role for the project
+(or a more permissive role).
+
+
+After creating a connector, you can set your cases to
+automatically close when they are sent to an external system.
+
+## Create a connector
+
+1. In your ((observability)) project, go to **Cases** → **Settings**.
+1. From the **Incident management system** list, select **Add new connector**.
+1. Select the system to send cases to: **((sn))**, **((jira))**, **((ibm-r))**, **((swimlane))**,
+ or **((webhook-cm))**.
+
+ ![Add a connector to send cases to an external source](../images/cases-add-connector.png)
+
+1. Enter your required settings. For connector configuration details, refer to:
+ - [((ibm-r)) connector](((kibana-ref))/resilient-action-type.html)
+ - [((jira)) connector](((kibana-ref))/jira-action-type.html)
+ - [((sn-itsm)) connector](((kibana-ref))/servicenow-action-type.html)
+ - [((sn-sir)) connector](((kibana-ref))/servicenow-sir-action-type.html)
+ - [((swimlane)) connector](((kibana-ref))/swimlane-action-type.html)
+ - [((webhook-cm)) connector](((kibana-ref))/cases-webhook-action-type.html)
+
+ {/* Should we be linking out to kibana docs for this info? */}
+
+1. Click **Save**.
+
+## Edit a connector
+
+You can create additional connectors, update existing connectors, and change the connector used to send cases to external systems.
+
+
+You can also configure which connector is used for each case individually. Refer to .
+
+
+To change the default connector used to send cases to external systems:
+
+1. Go to **Cases** → **Settings**.
+1. Select the required connector from the **Incident management system** list.
+
+To update an existing connector:
+
+1. Click **Update \**.
+1. Update the connector fields as required.
+
+## Sending and closing cases
+
+To send a case to an external system, click the **Push as \ incident** button from the individual case page.
+This information is not sent automatically — if you make further changes to the shared case fields, you should push the case again.
+
+If you close cases in your external incident management system, the cases will remain open in Elastic Observability until you close them
+manually (the information is only sent in one direction).
+
+To close cases when they are sent to an external system, select
+**Automatically close cases when pushing new incident to external system** from **Cases** → **Settings**.
diff --git a/docs/en/serverless/dashboards/dashboards-and-visualizations.mdx b/docs/en/serverless/dashboards/dashboards-and-visualizations.mdx
new file mode 100644
index 0000000000..8d96ea6c79
--- /dev/null
+++ b/docs/en/serverless/dashboards/dashboards-and-visualizations.mdx
@@ -0,0 +1,45 @@
+---
+id: serverlessObservabilityDashboards
+slug: /serverless/observability/dashboards
+title: Dashboards
+description: Visualize your observability data using pre-built dashboards or create your own.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Elastic provides a wide range of pre-built dashboards for visualizing observability data from a variety of sources.
+These dashboards are loaded automatically when you install [Elastic integrations](https://docs.elastic.co/integrations).
+
+You can also create new dashboards and visualizations based on your data views to get a full picture of your data.
+
+In your Observability project, go to **Dashboards** to see installed dashboards or create your own.
+This example shows dashboards loaded by the System integration:
+
+![Screenshot showing list of System dashboards](../images/dashboards.png)
+
+Notice you can filter the list of dashboards:
+
+* Use the text search field to filter by name or description.
+* Use the **Tags** menu to filter by tag. To create a new tag or edit existing tags, click **Manage tags**.
+* Click a dashboard's tags to toggle filtering for each tag.
+
+## Create new dashboards
+
+To create a new dashboard, click **Create Dashboard** and begin adding visualizations.
+You can create charts, graphs, maps, tables, and other types of visualizations from your data,
+or you can add visualizations from the library.
+
+You can also add other types of panels — such as filters, links, and text — and add
+controls like time sliders.
+
+For more information about creating dashboards,
+refer to [Create your first dashboard](((kibana-ref))/create-a-dashboard-of-panels-with-web-server-data.html).
+
+
+ The tutorial about creating your first dashboard is written for ((kib)) users,
+ but the steps for serverless are very similar.
+ To load the sample data in serverless, go to **Project Settings** → **Integrations** in the navigation pane,
+ then search for "sample data".
+
+
diff --git a/docs/en/serverless/images/action-alert-summary.png b/docs/en/serverless/images/action-alert-summary.png
new file mode 100644
index 0000000000..89157d6629
Binary files /dev/null and b/docs/en/serverless/images/action-alert-summary.png differ
diff --git a/docs/en/serverless/images/action-variables-popup.png b/docs/en/serverless/images/action-variables-popup.png
new file mode 100644
index 0000000000..ffd3723cb3
Binary files /dev/null and b/docs/en/serverless/images/action-variables-popup.png differ
diff --git a/docs/en/serverless/images/add-custom-metric.png b/docs/en/serverless/images/add-custom-metric.png
new file mode 100644
index 0000000000..c807ab2f7b
Binary files /dev/null and b/docs/en/serverless/images/add-custom-metric.png differ
diff --git a/docs/en/serverless/images/ai-assistant-button.png b/docs/en/serverless/images/ai-assistant-button.png
new file mode 100644
index 0000000000..4adcd34a48
Binary files /dev/null and b/docs/en/serverless/images/ai-assistant-button.png differ
diff --git a/docs/en/serverless/images/ai-assistant-chat.png b/docs/en/serverless/images/ai-assistant-chat.png
new file mode 100644
index 0000000000..80cc50b4e7
Binary files /dev/null and b/docs/en/serverless/images/ai-assistant-chat.png differ
diff --git a/docs/en/serverless/images/ai-assistant-logs-prompts.png b/docs/en/serverless/images/ai-assistant-logs-prompts.png
new file mode 100644
index 0000000000..86316298b8
Binary files /dev/null and b/docs/en/serverless/images/ai-assistant-logs-prompts.png differ
diff --git a/docs/en/serverless/images/ai-assistant-logs.png b/docs/en/serverless/images/ai-assistant-logs.png
new file mode 100644
index 0000000000..2de48a6e6c
Binary files /dev/null and b/docs/en/serverless/images/ai-assistant-logs.png differ
diff --git a/docs/en/serverless/images/ai-assistant-overview.gif b/docs/en/serverless/images/ai-assistant-overview.gif
new file mode 100644
index 0000000000..7393bd53c9
Binary files /dev/null and b/docs/en/serverless/images/ai-assistant-overview.gif differ
diff --git a/docs/en/serverless/images/alert-action-frequency.png b/docs/en/serverless/images/alert-action-frequency.png
new file mode 100644
index 0000000000..2936ed2247
Binary files /dev/null and b/docs/en/serverless/images/alert-action-frequency.png differ
diff --git a/docs/en/serverless/images/alert-action-types.png b/docs/en/serverless/images/alert-action-types.png
new file mode 100644
index 0000000000..e38f98b3bb
Binary files /dev/null and b/docs/en/serverless/images/alert-action-types.png differ
diff --git a/docs/en/serverless/images/alert-anomaly-action-frequency-recovered.png b/docs/en/serverless/images/alert-anomaly-action-frequency-recovered.png
new file mode 100644
index 0000000000..82aa544123
Binary files /dev/null and b/docs/en/serverless/images/alert-anomaly-action-frequency-recovered.png differ
diff --git a/docs/en/serverless/images/alert-apm-action-frequency-recovered.png b/docs/en/serverless/images/alert-apm-action-frequency-recovered.png
new file mode 100644
index 0000000000..ae25379628
Binary files /dev/null and b/docs/en/serverless/images/alert-apm-action-frequency-recovered.png differ
diff --git a/docs/en/serverless/images/alert-details-flyout.png b/docs/en/serverless/images/alert-details-flyout.png
new file mode 100644
index 0000000000..362ed053bc
Binary files /dev/null and b/docs/en/serverless/images/alert-details-flyout.png differ
diff --git a/docs/en/serverless/images/alert-preview.png b/docs/en/serverless/images/alert-preview.png
new file mode 100644
index 0000000000..aac5279d0e
Binary files /dev/null and b/docs/en/serverless/images/alert-preview.png differ
diff --git a/docs/en/serverless/images/alerting-es-query-rule-action-query-matched.png b/docs/en/serverless/images/alerting-es-query-rule-action-query-matched.png
new file mode 100644
index 0000000000..cafa6e82e2
Binary files /dev/null and b/docs/en/serverless/images/alerting-es-query-rule-action-query-matched.png differ
diff --git a/docs/en/serverless/images/alerting-es-query-rule-action-summary.png b/docs/en/serverless/images/alerting-es-query-rule-action-summary.png
new file mode 100644
index 0000000000..1e098d77fc
Binary files /dev/null and b/docs/en/serverless/images/alerting-es-query-rule-action-summary.png differ
diff --git a/docs/en/serverless/images/alerting-rule-types-es-query-conditions.png b/docs/en/serverless/images/alerting-rule-types-es-query-conditions.png
new file mode 100644
index 0000000000..2151709ada
Binary files /dev/null and b/docs/en/serverless/images/alerting-rule-types-es-query-conditions.png differ
diff --git a/docs/en/serverless/images/alerting-rule-types-es-query-example-action-variable.png b/docs/en/serverless/images/alerting-rule-types-es-query-example-action-variable.png
new file mode 100644
index 0000000000..8cb5c07543
Binary files /dev/null and b/docs/en/serverless/images/alerting-rule-types-es-query-example-action-variable.png differ
diff --git a/docs/en/serverless/images/alerting-rule-types-es-query-invalid.png b/docs/en/serverless/images/alerting-rule-types-es-query-invalid.png
new file mode 100644
index 0000000000..0d36e33741
Binary files /dev/null and b/docs/en/serverless/images/alerting-rule-types-es-query-invalid.png differ
diff --git a/docs/en/serverless/images/alerting-rule-types-es-query-valid.png b/docs/en/serverless/images/alerting-rule-types-es-query-valid.png
new file mode 100644
index 0000000000..6c63f777df
Binary files /dev/null and b/docs/en/serverless/images/alerting-rule-types-es-query-valid.png differ
diff --git a/docs/en/serverless/images/alerting-rule-types-esql-query-valid.png b/docs/en/serverless/images/alerting-rule-types-esql-query-valid.png
new file mode 100644
index 0000000000..59f3fdfc22
Binary files /dev/null and b/docs/en/serverless/images/alerting-rule-types-esql-query-valid.png differ
diff --git a/docs/en/serverless/images/alerts-create-apm-anomaly.png b/docs/en/serverless/images/alerts-create-apm-anomaly.png
new file mode 100644
index 0000000000..10c9661093
Binary files /dev/null and b/docs/en/serverless/images/alerts-create-apm-anomaly.png differ
diff --git a/docs/en/serverless/images/alerts-create-rule-apm-latency-threshold.png b/docs/en/serverless/images/alerts-create-rule-apm-latency-threshold.png
new file mode 100644
index 0000000000..3841f736e1
Binary files /dev/null and b/docs/en/serverless/images/alerts-create-rule-apm-latency-threshold.png differ
diff --git a/docs/en/serverless/images/alerts-create-rule-error-count.png b/docs/en/serverless/images/alerts-create-rule-error-count.png
new file mode 100644
index 0000000000..94d3621c97
Binary files /dev/null and b/docs/en/serverless/images/alerts-create-rule-error-count.png differ
diff --git a/docs/en/serverless/images/alerts-create-rule-failed-transaction-rate.png b/docs/en/serverless/images/alerts-create-rule-failed-transaction-rate.png
new file mode 100644
index 0000000000..14f12518be
Binary files /dev/null and b/docs/en/serverless/images/alerts-create-rule-failed-transaction-rate.png differ
diff --git a/docs/en/serverless/images/alerts-detail-apm-anomaly.png b/docs/en/serverless/images/alerts-detail-apm-anomaly.png
new file mode 100644
index 0000000000..17b5345efc
Binary files /dev/null and b/docs/en/serverless/images/alerts-detail-apm-anomaly.png differ
diff --git a/docs/en/serverless/images/alerts-edit-rule.png b/docs/en/serverless/images/alerts-edit-rule.png
new file mode 100644
index 0000000000..062e6771e1
Binary files /dev/null and b/docs/en/serverless/images/alerts-edit-rule.png differ
diff --git a/docs/en/serverless/images/alerts-rules-logs.png b/docs/en/serverless/images/alerts-rules-logs.png
new file mode 100644
index 0000000000..dbaea790b4
Binary files /dev/null and b/docs/en/serverless/images/alerts-rules-logs.png differ
diff --git a/docs/en/serverless/images/anomalies-overlay.png b/docs/en/serverless/images/anomalies-overlay.png
new file mode 100644
index 0000000000..096de18ceb
Binary files /dev/null and b/docs/en/serverless/images/anomalies-overlay.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-alert.png b/docs/en/serverless/images/anomaly-detection-alert.png
new file mode 100644
index 0000000000..acf67aa4e0
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-alert.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-configure-job-rules.png b/docs/en/serverless/images/anomaly-detection-configure-job-rules.png
new file mode 100644
index 0000000000..0094a82dae
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-configure-job-rules.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-create-calendar.png b/docs/en/serverless/images/anomaly-detection-create-calendar.png
new file mode 100644
index 0000000000..30c0feaec2
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-create-calendar.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-create-filter-list.png b/docs/en/serverless/images/anomaly-detection-create-filter-list.png
new file mode 100644
index 0000000000..226491d7ea
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-create-filter-list.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-custom-url.png b/docs/en/serverless/images/anomaly-detection-custom-url.png
new file mode 100644
index 0000000000..4914589ce1
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-custom-url.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-details.png b/docs/en/serverless/images/anomaly-detection-details.png
new file mode 100644
index 0000000000..7cf20c4c44
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-details.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-forecast.png b/docs/en/serverless/images/anomaly-detection-forecast.png
new file mode 100644
index 0000000000..c745c7285a
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-forecast.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-multi-metric-details.png b/docs/en/serverless/images/anomaly-detection-multi-metric-details.png
new file mode 100644
index 0000000000..2e91875bcd
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-multi-metric-details.png differ
diff --git a/docs/en/serverless/images/anomaly-detection-single-metric-viewer.png b/docs/en/serverless/images/anomaly-detection-single-metric-viewer.png
new file mode 100644
index 0000000000..b89e5e0181
Binary files /dev/null and b/docs/en/serverless/images/anomaly-detection-single-metric-viewer.png differ
diff --git a/docs/en/serverless/images/anomaly-explorer.png b/docs/en/serverless/images/anomaly-explorer.png
new file mode 100644
index 0000000000..dd46c11cfa
Binary files /dev/null and b/docs/en/serverless/images/anomaly-explorer.png differ
diff --git a/docs/en/serverless/images/apm-agents-aws-lambda-functions-architecture.png b/docs/en/serverless/images/apm-agents-aws-lambda-functions-architecture.png
new file mode 100644
index 0000000000..22de0b5c6a
Binary files /dev/null and b/docs/en/serverless/images/apm-agents-aws-lambda-functions-architecture.png differ
diff --git a/docs/en/serverless/images/apm-alert.png b/docs/en/serverless/images/apm-alert.png
new file mode 100644
index 0000000000..92b6f5dde9
Binary files /dev/null and b/docs/en/serverless/images/apm-alert.png differ
diff --git a/docs/en/serverless/images/apm-dt-sampling-example-1.png b/docs/en/serverless/images/apm-dt-sampling-example-1.png
new file mode 100644
index 0000000000..a3def0c7bf
Binary files /dev/null and b/docs/en/serverless/images/apm-dt-sampling-example-1.png differ
diff --git a/docs/en/serverless/images/apm-dt-sampling-example-2.png b/docs/en/serverless/images/apm-dt-sampling-example-2.png
new file mode 100644
index 0000000000..d7f87bcd89
Binary files /dev/null and b/docs/en/serverless/images/apm-dt-sampling-example-2.png differ
diff --git a/docs/en/serverless/images/cases-add-connector.png b/docs/en/serverless/images/cases-add-connector.png
new file mode 100644
index 0000000000..8a344bae93
Binary files /dev/null and b/docs/en/serverless/images/cases-add-connector.png differ
diff --git a/docs/en/serverless/images/cases-files-tab.png b/docs/en/serverless/images/cases-files-tab.png
new file mode 100644
index 0000000000..a11287b060
Binary files /dev/null and b/docs/en/serverless/images/cases-files-tab.png differ
diff --git a/docs/en/serverless/images/cases.png b/docs/en/serverless/images/cases.png
new file mode 100644
index 0000000000..b8b11abdad
Binary files /dev/null and b/docs/en/serverless/images/cases.png differ
diff --git a/docs/en/serverless/images/change-point-detection-attach-charts.png b/docs/en/serverless/images/change-point-detection-attach-charts.png
new file mode 100644
index 0000000000..9609d7ebd3
Binary files /dev/null and b/docs/en/serverless/images/change-point-detection-attach-charts.png differ
diff --git a/docs/en/serverless/images/change-point-detection-filter-by-type.png b/docs/en/serverless/images/change-point-detection-filter-by-type.png
new file mode 100644
index 0000000000..093980424d
Binary files /dev/null and b/docs/en/serverless/images/change-point-detection-filter-by-type.png differ
diff --git a/docs/en/serverless/images/change-point-detection-view-selected.png b/docs/en/serverless/images/change-point-detection-view-selected.png
new file mode 100644
index 0000000000..409e7f5158
Binary files /dev/null and b/docs/en/serverless/images/change-point-detection-view-selected.png differ
diff --git a/docs/en/serverless/images/change-point-detection.png b/docs/en/serverless/images/change-point-detection.png
new file mode 100644
index 0000000000..4e6afccff9
Binary files /dev/null and b/docs/en/serverless/images/change-point-detection.png differ
diff --git a/docs/en/serverless/images/conditional-alerts.png b/docs/en/serverless/images/conditional-alerts.png
new file mode 100644
index 0000000000..7e9d32fd20
Binary files /dev/null and b/docs/en/serverless/images/conditional-alerts.png differ
diff --git a/docs/en/serverless/images/custom-logs-advanced-options.png b/docs/en/serverless/images/custom-logs-advanced-options.png
new file mode 100644
index 0000000000..cc1bc3d0f5
Binary files /dev/null and b/docs/en/serverless/images/custom-logs-advanced-options.png differ
diff --git a/docs/en/serverless/images/custom-threshold-rule.png b/docs/en/serverless/images/custom-threshold-rule.png
new file mode 100644
index 0000000000..4290d3a426
Binary files /dev/null and b/docs/en/serverless/images/custom-threshold-rule.png differ
diff --git a/docs/en/serverless/images/custom-threshold-run-when.png b/docs/en/serverless/images/custom-threshold-run-when.png
new file mode 100644
index 0000000000..6fee51ccfd
Binary files /dev/null and b/docs/en/serverless/images/custom-threshold-run-when.png differ
diff --git a/docs/en/serverless/images/dashboards.png b/docs/en/serverless/images/dashboards.png
new file mode 100644
index 0000000000..b709f2a291
Binary files /dev/null and b/docs/en/serverless/images/dashboards.png differ
diff --git a/docs/en/serverless/images/es-query-rule-action-summary.png b/docs/en/serverless/images/es-query-rule-action-summary.png
new file mode 100644
index 0000000000..1e098d77fc
Binary files /dev/null and b/docs/en/serverless/images/es-query-rule-action-summary.png differ
diff --git a/docs/en/serverless/images/expand-icon.png b/docs/en/serverless/images/expand-icon.png
new file mode 100644
index 0000000000..c69ffcc559
Binary files /dev/null and b/docs/en/serverless/images/expand-icon.png differ
diff --git a/docs/en/serverless/images/hosts-dashed-and-missing.png b/docs/en/serverless/images/hosts-dashed-and-missing.png
new file mode 100644
index 0000000000..856e50e4b4
Binary files /dev/null and b/docs/en/serverless/images/hosts-dashed-and-missing.png differ
diff --git a/docs/en/serverless/images/hosts-dashed.png b/docs/en/serverless/images/hosts-dashed.png
new file mode 100644
index 0000000000..5034d23d01
Binary files /dev/null and b/docs/en/serverless/images/hosts-dashed.png differ
diff --git a/docs/en/serverless/images/hosts-inspect.png b/docs/en/serverless/images/hosts-inspect.png
new file mode 100644
index 0000000000..f71598bd2d
Binary files /dev/null and b/docs/en/serverless/images/hosts-inspect.png differ
diff --git a/docs/en/serverless/images/hosts-logs.png b/docs/en/serverless/images/hosts-logs.png
new file mode 100644
index 0000000000..3d846415bb
Binary files /dev/null and b/docs/en/serverless/images/hosts-logs.png differ
diff --git a/docs/en/serverless/images/hosts-missing-data.png b/docs/en/serverless/images/hosts-missing-data.png
new file mode 100644
index 0000000000..a57be2d66a
Binary files /dev/null and b/docs/en/serverless/images/hosts-missing-data.png differ
diff --git a/docs/en/serverless/images/hosts-open-in-lens.png b/docs/en/serverless/images/hosts-open-in-lens.png
new file mode 100644
index 0000000000..cd37372e80
Binary files /dev/null and b/docs/en/serverless/images/hosts-open-in-lens.png differ
diff --git a/docs/en/serverless/images/hosts-view-alerts.png b/docs/en/serverless/images/hosts-view-alerts.png
new file mode 100644
index 0000000000..3b3e18eab6
Binary files /dev/null and b/docs/en/serverless/images/hosts-view-alerts.png differ
diff --git a/docs/en/serverless/images/hosts.png b/docs/en/serverless/images/hosts.png
new file mode 100644
index 0000000000..479ba32124
Binary files /dev/null and b/docs/en/serverless/images/hosts.png differ
diff --git a/docs/en/serverless/images/inventory-alert.png b/docs/en/serverless/images/inventory-alert.png
new file mode 100644
index 0000000000..a0508106d5
Binary files /dev/null and b/docs/en/serverless/images/inventory-alert.png differ
diff --git a/docs/en/serverless/images/inventory-threshold-run-when-selection.png b/docs/en/serverless/images/inventory-threshold-run-when-selection.png
new file mode 100644
index 0000000000..f9cfa2c854
Binary files /dev/null and b/docs/en/serverless/images/inventory-threshold-run-when-selection.png differ
diff --git a/docs/en/serverless/images/kubernetes-filter.png b/docs/en/serverless/images/kubernetes-filter.png
new file mode 100644
index 0000000000..06b9f03f6e
Binary files /dev/null and b/docs/en/serverless/images/kubernetes-filter.png differ
diff --git a/docs/en/serverless/images/log-copy-es-endpoint.png b/docs/en/serverless/images/log-copy-es-endpoint.png
new file mode 100644
index 0000000000..0a52a33989
Binary files /dev/null and b/docs/en/serverless/images/log-copy-es-endpoint.png differ
diff --git a/docs/en/serverless/images/log-explorer-overview.png b/docs/en/serverless/images/log-explorer-overview.png
new file mode 100644
index 0000000000..41448ef2ca
Binary files /dev/null and b/docs/en/serverless/images/log-explorer-overview.png differ
diff --git a/docs/en/serverless/images/log-explorer-select-syslogs.png b/docs/en/serverless/images/log-explorer-select-syslogs.png
new file mode 100644
index 0000000000..b49e1665d5
Binary files /dev/null and b/docs/en/serverless/images/log-explorer-select-syslogs.png differ
diff --git a/docs/en/serverless/images/log-explorer.png b/docs/en/serverless/images/log-explorer.png
new file mode 100644
index 0000000000..92b9b2bb96
Binary files /dev/null and b/docs/en/serverless/images/log-explorer.png differ
diff --git a/docs/en/serverless/images/log-help-icon.png b/docs/en/serverless/images/log-help-icon.png
new file mode 100644
index 0000000000..e33d8b59a4
Binary files /dev/null and b/docs/en/serverless/images/log-help-icon.png differ
diff --git a/docs/en/serverless/images/log-menu.png b/docs/en/serverless/images/log-menu.png
new file mode 100644
index 0000000000..db61571ca2
Binary files /dev/null and b/docs/en/serverless/images/log-menu.png differ
diff --git a/docs/en/serverless/images/log-pattern-analysis.png b/docs/en/serverless/images/log-pattern-analysis.png
new file mode 100644
index 0000000000..fa6eecc8e7
Binary files /dev/null and b/docs/en/serverless/images/log-pattern-analysis.png differ
diff --git a/docs/en/serverless/images/log-rate-analysis-results.png b/docs/en/serverless/images/log-rate-analysis-results.png
new file mode 100644
index 0000000000..8027968858
Binary files /dev/null and b/docs/en/serverless/images/log-rate-analysis-results.png differ
diff --git a/docs/en/serverless/images/log-rate-analysis.png b/docs/en/serverless/images/log-rate-analysis.png
new file mode 100644
index 0000000000..15855c8b9a
Binary files /dev/null and b/docs/en/serverless/images/log-rate-analysis.png differ
diff --git a/docs/en/serverless/images/log-rate-histogram.png b/docs/en/serverless/images/log-rate-histogram.png
new file mode 100644
index 0000000000..436ceafeae
Binary files /dev/null and b/docs/en/serverless/images/log-rate-histogram.png differ
diff --git a/docs/en/serverless/images/log-threshold-breach.png b/docs/en/serverless/images/log-threshold-breach.png
new file mode 100644
index 0000000000..200ddfb875
Binary files /dev/null and b/docs/en/serverless/images/log-threshold-breach.png differ
diff --git a/docs/en/serverless/images/logs-end-date.png b/docs/en/serverless/images/logs-end-date.png
new file mode 100644
index 0000000000..1d2932cd09
Binary files /dev/null and b/docs/en/serverless/images/logs-end-date.png differ
diff --git a/docs/en/serverless/images/logs-kql-filter.png b/docs/en/serverless/images/logs-kql-filter.png
new file mode 100644
index 0000000000..6eab1d1710
Binary files /dev/null and b/docs/en/serverless/images/logs-kql-filter.png differ
diff --git a/docs/en/serverless/images/logs-overlay.png b/docs/en/serverless/images/logs-overlay.png
new file mode 100644
index 0000000000..8b2e538cb2
Binary files /dev/null and b/docs/en/serverless/images/logs-overlay.png differ
diff --git a/docs/en/serverless/images/logs-start-date.png b/docs/en/serverless/images/logs-start-date.png
new file mode 100644
index 0000000000..6d8f8c6dd7
Binary files /dev/null and b/docs/en/serverless/images/logs-start-date.png differ
diff --git a/docs/en/serverless/images/logs-stream-logs-api-key-beats.png b/docs/en/serverless/images/logs-stream-logs-api-key-beats.png
new file mode 100644
index 0000000000..a080b23e8e
Binary files /dev/null and b/docs/en/serverless/images/logs-stream-logs-api-key-beats.png differ
diff --git a/docs/en/serverless/images/logs-stream-logs-config.png b/docs/en/serverless/images/logs-stream-logs-config.png
new file mode 100644
index 0000000000..4ef18a221d
Binary files /dev/null and b/docs/en/serverless/images/logs-stream-logs-config.png differ
diff --git a/docs/en/serverless/images/logs-stream-logs-service-name.png b/docs/en/serverless/images/logs-stream-logs-service-name.png
new file mode 100644
index 0000000000..c48b639679
Binary files /dev/null and b/docs/en/serverless/images/logs-stream-logs-service-name.png differ
diff --git a/docs/en/serverless/images/logs-threshold-conditional-alert.png b/docs/en/serverless/images/logs-threshold-conditional-alert.png
new file mode 100644
index 0000000000..73f6bfc5bc
Binary files /dev/null and b/docs/en/serverless/images/logs-threshold-conditional-alert.png differ
diff --git a/docs/en/serverless/images/metadata-overlay.png b/docs/en/serverless/images/metadata-overlay.png
new file mode 100644
index 0000000000..71a61f565e
Binary files /dev/null and b/docs/en/serverless/images/metadata-overlay.png differ
diff --git a/docs/en/serverless/images/metrics-app.png b/docs/en/serverless/images/metrics-app.png
new file mode 100644
index 0000000000..63975743d5
Binary files /dev/null and b/docs/en/serverless/images/metrics-app.png differ
diff --git a/docs/en/serverless/images/metrics-history-chart.png b/docs/en/serverless/images/metrics-history-chart.png
new file mode 100644
index 0000000000..cc62326f9f
Binary files /dev/null and b/docs/en/serverless/images/metrics-history-chart.png differ
diff --git a/docs/en/serverless/images/metrics-ml-jobs.png b/docs/en/serverless/images/metrics-ml-jobs.png
new file mode 100644
index 0000000000..052984f98d
Binary files /dev/null and b/docs/en/serverless/images/metrics-ml-jobs.png differ
diff --git a/docs/en/serverless/images/metrics-overlay.png b/docs/en/serverless/images/metrics-overlay.png
new file mode 100644
index 0000000000..4d40b434d4
Binary files /dev/null and b/docs/en/serverless/images/metrics-overlay.png differ
diff --git a/docs/en/serverless/images/observability-action-alert-summary.png b/docs/en/serverless/images/observability-action-alert-summary.png
new file mode 100644
index 0000000000..89157d6629
Binary files /dev/null and b/docs/en/serverless/images/observability-action-alert-summary.png differ
diff --git a/docs/en/serverless/images/observability-alerts-overview.png b/docs/en/serverless/images/observability-alerts-overview.png
new file mode 100644
index 0000000000..b7dd3aba62
Binary files /dev/null and b/docs/en/serverless/images/observability-alerts-overview.png differ
diff --git a/docs/en/serverless/images/observability-alerts-view.png b/docs/en/serverless/images/observability-alerts-view.png
new file mode 100644
index 0000000000..eeaa164cbc
Binary files /dev/null and b/docs/en/serverless/images/observability-alerts-view.png differ
diff --git a/docs/en/serverless/images/pod-metrics.png b/docs/en/serverless/images/pod-metrics.png
new file mode 100644
index 0000000000..3dc367a5a6
Binary files /dev/null and b/docs/en/serverless/images/pod-metrics.png differ
diff --git a/docs/en/serverless/images/private-locations-monitor-locations.png b/docs/en/serverless/images/private-locations-monitor-locations.png
new file mode 100644
index 0000000000..fc4dad9414
Binary files /dev/null and b/docs/en/serverless/images/private-locations-monitor-locations.png differ
diff --git a/docs/en/serverless/images/processes-overlay.png b/docs/en/serverless/images/processes-overlay.png
new file mode 100644
index 0000000000..eefb054392
Binary files /dev/null and b/docs/en/serverless/images/processes-overlay.png differ
diff --git a/docs/en/serverless/images/run-log-pattern-analysis.png b/docs/en/serverless/images/run-log-pattern-analysis.png
new file mode 100644
index 0000000000..eb4e9ae908
Binary files /dev/null and b/docs/en/serverless/images/run-log-pattern-analysis.png differ
diff --git a/docs/en/serverless/images/serverless-capabilities.svg b/docs/en/serverless/images/serverless-capabilities.svg
new file mode 100644
index 0000000000..559a5aae29
--- /dev/null
+++ b/docs/en/serverless/images/serverless-capabilities.svg
@@ -0,0 +1,20 @@
+
diff --git a/docs/en/serverless/images/services-inventory.png b/docs/en/serverless/images/services-inventory.png
new file mode 100644
index 0000000000..d32dd4e71d
Binary files /dev/null and b/docs/en/serverless/images/services-inventory.png differ
diff --git a/docs/en/serverless/images/slo-action-frequency.png b/docs/en/serverless/images/slo-action-frequency.png
new file mode 100644
index 0000000000..cef8d4307e
Binary files /dev/null and b/docs/en/serverless/images/slo-action-frequency.png differ
diff --git a/docs/en/serverless/images/slo-alerts-create-rule.png b/docs/en/serverless/images/slo-alerts-create-rule.png
new file mode 100644
index 0000000000..e3c6a8ce1f
Binary files /dev/null and b/docs/en/serverless/images/slo-alerts-create-rule.png differ
diff --git a/docs/en/serverless/images/slo-burn-rate-breach.png b/docs/en/serverless/images/slo-burn-rate-breach.png
new file mode 100644
index 0000000000..cdedd2d722
Binary files /dev/null and b/docs/en/serverless/images/slo-burn-rate-breach.png differ
diff --git a/docs/en/serverless/images/slo-dashboard-panel.png b/docs/en/serverless/images/slo-dashboard-panel.png
new file mode 100644
index 0000000000..87c330d10f
Binary files /dev/null and b/docs/en/serverless/images/slo-dashboard-panel.png differ
diff --git a/docs/en/serverless/images/slo-dashboard.png b/docs/en/serverless/images/slo-dashboard.png
new file mode 100644
index 0000000000..6c08697da5
Binary files /dev/null and b/docs/en/serverless/images/slo-dashboard.png differ
diff --git a/docs/en/serverless/images/slo-detailed-view.png b/docs/en/serverless/images/slo-detailed-view.png
new file mode 100644
index 0000000000..3cfb080a3d
Binary files /dev/null and b/docs/en/serverless/images/slo-detailed-view.png differ
diff --git a/docs/en/serverless/images/slo-filtering-options.png b/docs/en/serverless/images/slo-filtering-options.png
new file mode 100644
index 0000000000..a4580a933e
Binary files /dev/null and b/docs/en/serverless/images/slo-filtering-options.png differ
diff --git a/docs/en/serverless/images/slo-group-by.png b/docs/en/serverless/images/slo-group-by.png
new file mode 100644
index 0000000000..d13fdb9a4d
Binary files /dev/null and b/docs/en/serverless/images/slo-group-by.png differ
diff --git a/docs/en/serverless/images/synthetic-monitor-lifecycle.png b/docs/en/serverless/images/synthetic-monitor-lifecycle.png
new file mode 100644
index 0000000000..711fb1a6f0
Binary files /dev/null and b/docs/en/serverless/images/synthetic-monitor-lifecycle.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-individual-monitor-details.png b/docs/en/serverless/images/synthetics-analyze-individual-monitor-details.png
new file mode 100644
index 0000000000..08349dabf5
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-individual-monitor-details.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-individual-monitor-errors.png b/docs/en/serverless/images/synthetics-analyze-individual-monitor-errors.png
new file mode 100644
index 0000000000..3ee9d9b99b
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-individual-monitor-errors.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-individual-monitor-header.png b/docs/en/serverless/images/synthetics-analyze-individual-monitor-header.png
new file mode 100644
index 0000000000..0e8d3a6d8c
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-individual-monitor-header.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-individual-monitor-history.png b/docs/en/serverless/images/synthetics-analyze-individual-monitor-history.png
new file mode 100644
index 0000000000..7253ff3840
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-individual-monitor-history.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-journeys-over-time.png b/docs/en/serverless/images/synthetics-analyze-journeys-over-time.png
new file mode 100644
index 0000000000..b9acdffd23
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-journeys-over-time.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-run-code-executed.png b/docs/en/serverless/images/synthetics-analyze-one-run-code-executed.png
new file mode 100644
index 0000000000..f65f26934d
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-run-code-executed.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-run-compare-steps.png b/docs/en/serverless/images/synthetics-analyze-one-run-compare-steps.png
new file mode 100644
index 0000000000..28a663c74b
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-run-compare-steps.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-step-metrics.png b/docs/en/serverless/images/synthetics-analyze-one-step-metrics.png
new file mode 100644
index 0000000000..d9d0d1bc83
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-step-metrics.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-step-network.png b/docs/en/serverless/images/synthetics-analyze-one-step-network.png
new file mode 100644
index 0000000000..b448e474b1
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-step-network.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-step-object.png b/docs/en/serverless/images/synthetics-analyze-one-step-object.png
new file mode 100644
index 0000000000..4846992088
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-step-object.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-step-screenshot.png b/docs/en/serverless/images/synthetics-analyze-one-step-screenshot.png
new file mode 100644
index 0000000000..97a687d6f7
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-step-screenshot.png differ
diff --git a/docs/en/serverless/images/synthetics-analyze-one-step-timing.png b/docs/en/serverless/images/synthetics-analyze-one-step-timing.png
new file mode 100644
index 0000000000..0bd844384f
Binary files /dev/null and b/docs/en/serverless/images/synthetics-analyze-one-step-timing.png differ
diff --git a/docs/en/serverless/images/synthetics-create-test-script-recorder.png b/docs/en/serverless/images/synthetics-create-test-script-recorder.png
new file mode 100644
index 0000000000..6d488b8276
Binary files /dev/null and b/docs/en/serverless/images/synthetics-create-test-script-recorder.png differ
diff --git a/docs/en/serverless/images/synthetics-get-started-projects.png b/docs/en/serverless/images/synthetics-get-started-projects.png
new file mode 100644
index 0000000000..8632f5708d
Binary files /dev/null and b/docs/en/serverless/images/synthetics-get-started-projects.png differ
diff --git a/docs/en/serverless/images/synthetics-get-started-ui-lightweight.png b/docs/en/serverless/images/synthetics-get-started-ui-lightweight.png
new file mode 100644
index 0000000000..1d1223bea9
Binary files /dev/null and b/docs/en/serverless/images/synthetics-get-started-ui-lightweight.png differ
diff --git a/docs/en/serverless/images/synthetics-get-started-ui.png b/docs/en/serverless/images/synthetics-get-started-ui.png
new file mode 100644
index 0000000000..e94e4ceacd
Binary files /dev/null and b/docs/en/serverless/images/synthetics-get-started-ui.png differ
diff --git a/docs/en/serverless/images/synthetics-monitor-management-api-key.png b/docs/en/serverless/images/synthetics-monitor-management-api-key.png
new file mode 100644
index 0000000000..84956e8e4b
Binary files /dev/null and b/docs/en/serverless/images/synthetics-monitor-management-api-key.png differ
diff --git a/docs/en/serverless/images/synthetics-monitor-page.png b/docs/en/serverless/images/synthetics-monitor-page.png
new file mode 100644
index 0000000000..bda0383851
Binary files /dev/null and b/docs/en/serverless/images/synthetics-monitor-page.png differ
diff --git a/docs/en/serverless/images/synthetics-params-secrets-kibana-define.png b/docs/en/serverless/images/synthetics-params-secrets-kibana-define.png
new file mode 100644
index 0000000000..e07aa48064
Binary files /dev/null and b/docs/en/serverless/images/synthetics-params-secrets-kibana-define.png differ
diff --git a/docs/en/serverless/images/synthetics-params-secrets-kibana-use-browser.png b/docs/en/serverless/images/synthetics-params-secrets-kibana-use-browser.png
new file mode 100644
index 0000000000..2755e7b47d
Binary files /dev/null and b/docs/en/serverless/images/synthetics-params-secrets-kibana-use-browser.png differ
diff --git a/docs/en/serverless/images/synthetics-params-secrets-kibana-use-lightweight.png b/docs/en/serverless/images/synthetics-params-secrets-kibana-use-lightweight.png
new file mode 100644
index 0000000000..52fb703128
Binary files /dev/null and b/docs/en/serverless/images/synthetics-params-secrets-kibana-use-lightweight.png differ
diff --git a/docs/en/serverless/images/synthetics-retest.png b/docs/en/serverless/images/synthetics-retest.png
new file mode 100644
index 0000000000..b55b96615d
Binary files /dev/null and b/docs/en/serverless/images/synthetics-retest.png differ
diff --git a/docs/en/serverless/images/synthetics-settings-alerting.png b/docs/en/serverless/images/synthetics-settings-alerting.png
new file mode 100644
index 0000000000..37e77bdaa9
Binary files /dev/null and b/docs/en/serverless/images/synthetics-settings-alerting.png differ
diff --git a/docs/en/serverless/images/synthetics-settings-api-keys.png b/docs/en/serverless/images/synthetics-settings-api-keys.png
new file mode 100644
index 0000000000..5990883241
Binary files /dev/null and b/docs/en/serverless/images/synthetics-settings-api-keys.png differ
diff --git a/docs/en/serverless/images/synthetics-settings-data-retention.png b/docs/en/serverless/images/synthetics-settings-data-retention.png
new file mode 100644
index 0000000000..6d49753fa9
Binary files /dev/null and b/docs/en/serverless/images/synthetics-settings-data-retention.png differ
diff --git a/docs/en/serverless/images/synthetics-settings-disable-default-rules.png b/docs/en/serverless/images/synthetics-settings-disable-default-rules.png
new file mode 100644
index 0000000000..a58d9a1457
Binary files /dev/null and b/docs/en/serverless/images/synthetics-settings-disable-default-rules.png differ
diff --git a/docs/en/serverless/images/synthetics-settings-global-parameters.png b/docs/en/serverless/images/synthetics-settings-global-parameters.png
new file mode 100644
index 0000000000..830f9a9777
Binary files /dev/null and b/docs/en/serverless/images/synthetics-settings-global-parameters.png differ
diff --git a/docs/en/serverless/images/synthetics-settings-private-locations.png b/docs/en/serverless/images/synthetics-settings-private-locations.png
new file mode 100644
index 0000000000..62ba5f7ecb
Binary files /dev/null and b/docs/en/serverless/images/synthetics-settings-private-locations.png differ
diff --git a/docs/en/serverless/images/synthetics-ui-inline-script.png b/docs/en/serverless/images/synthetics-ui-inline-script.png
new file mode 100644
index 0000000000..d819c4ed72
Binary files /dev/null and b/docs/en/serverless/images/synthetics-ui-inline-script.png differ
diff --git a/docs/en/serverless/images/table-view-icon.png b/docs/en/serverless/images/table-view-icon.png
new file mode 100644
index 0000000000..c761abfb38
Binary files /dev/null and b/docs/en/serverless/images/table-view-icon.png differ
diff --git a/docs/en/serverless/infra-monitoring/analyze-hosts.mdx b/docs/en/serverless/infra-monitoring/analyze-hosts.mdx
new file mode 100644
index 0000000000..4584f07d94
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/analyze-hosts.mdx
@@ -0,0 +1,268 @@
+---
+id: serverlessObservabilityAnalyzeHosts
+slug: /serverless/observability/analyze-hosts
+title: Analyze and compare hosts
+description: Get a metrics-driven view of your hosts backed by an easy-to-use interface called Lens.
+tags: [ 'serverless', 'observability', 'how to' ]
+---
+
+
+import HostDetails from '../transclusion/host-details.mdx'
+
+
+
+
+We'd love to get your feedback!
+[Tell us what you think!](https://docs.google.com/forms/d/e/1FAIpQLScRHG8TIVb1Oq8ZhD4aks3P1TmgiM58TY123QpDCcBz83YC6w/viewform)
+
+The **Hosts** page provides a metrics-driven view of your infrastructure backed
+by an easy-to-use interface called Lens. On the **Hosts** page, you can view
+health and performance metrics to help you quickly:
+
+* Analyze and compare hosts without having to build new dashboards.
+* Identify which hosts trigger the most alerts.
+* Troubleshoot and resolve issues quickly.
+* View historical data to rule out false alerts and identify root causes.
+* Filter and search the data to focus on the hosts you care about the most.
+
+To access the **Hosts** page, in your ((observability)) project, go to
+**Infrastructure** → **Hosts**.
+
+![Screenshot of the Hosts page](../images/hosts.png)
+
+To learn more about the metrics shown on this page, refer to the documentation.
+
+
+
+If you haven't added data yet, click **Add data** to search for and install an Elastic integration.
+
+Need help getting started? Follow the steps in
+Get started with system metrics.
+
+
+
+The **Hosts** page provides several ways to view host metrics:
+
+* Overview tiles show the number of hosts returned by your search plus
+ averages of key metrics, including CPU usage, memory usage, and throughput.
+
+* The Host limit controls the maximum number of hosts shown on the page. The
+ default is 50, which means the page shows data for the top 50 hosts based on the
+ most recent timestamps. You can increase the host limit to see data for more
+ hosts, but doing so may impact query performance.
+
+* The Hosts table shows a breakdown of metrics for each host along with an alert count
+ for any hosts with active alerts. You may need to page through the list
+ or change the number of rows displayed on each page to see all of your hosts.
+
+* Each host name is an active link to a page,
+ which includes metrics, host metadata, alerts, processes, logs, and anomalies.
+ You can optionally open the host details in an overlay.
+
+* Table columns are sortable, but note that the sorting behavior is applied to
+ the already returned data set.
+
+* The tabs at the bottom of the page show an overview of the metrics, logs,
+ and alerts for all hosts returned by your search.
+
+
+ For more information about creating and viewing alerts, refer to .
+
+
+
+
+## Filter the Hosts view
+
+The **Hosts** page provides several mechanisms for filtering the data on the
+page:
+
+* Enter a search query using [((kib)) Query Language](((kibana-ref))/kuery-query.html) to show metrics that match your search criteria. For example,
+ to see metrics for hosts running on linux, enter `host.os.type : "linux"`.
+ Otherwise you’ll see metrics for all your monitored hosts (up to the number of
+ hosts specified by the host limit).
+
+* Select additional criteria to filter the view:
+ * In the **Operating System** list, select one or more operating systems
+ to include (or exclude) metrics for hosts running the selected operating systems.
+
+ * In the **Cloud Provider** list, select one or more cloud providers to
+ include (or exclude) metrics for hosts running on the selected cloud providers.
+
+ * In the **Service Name** list, select one or more service names to
+ include (or exclude) metrics for the hosts running the selected services.
+ Services must be instrumented by APM to be filterable.
+ This filter is useful for comparing different hosts to determine whether a problem lies
+ with a service or the host that it is running on.
+
+
+ Filtered results are sorted by _document count_.
+ Document count is the number of events received by Elastic for the hosts that match your filter criteria.
+
+
+* Change the date range in the time filter, or click and drag on a
+ visualization to change the date range.
+
+* Within a visualization, click a point on a line and apply filters to set other
+ visualizations on the page to the same time and/or host.
+
+
+
+## View metrics
+
+On the **Metrics** tab of the **Hosts** page, view metrics trending over time, including normalized load,
+CPU usage, memory usage, network inbound, network outbound, disk read IOPS, and
+disk write IOPS. Place your cursor over a line to view metrics at a specific
+point in time.
+
+To see metrics for a specific host, refer to .
+
+
+
+### Inspect and download metrics
+
+You can access a text-based view of the data underlying
+your metrics visualizations and optionally download the data to a
+comma-separated (CSV) file.
+
+Hover your cursor over a visualization, then in the upper-right corner, click
+the ellipsis icon to inspect the data.
+
+![Screenshot showing option to inspect data](../images/hosts-inspect.png)
+
+In the flyout, click **Download CSV** to download formatted or raw data to a CSV
+file.
+
+Click **View: Data** and notice that you can change the view to **Requests** to explore the request
+used to fetch the data and the response returned from ((es)). On the **Request** tab, click links
+to further inspect and analyze the request in the Dev Console or Search Profiler.
+
+
+
+### Open in Lens
+
+Metrics visualizations are powered by Lens, meaning you can continue your
+analysis in Lens if you require more flexibility. Hover your cursor over a
+visualization, then click the ellipsis icon in the upper-right corner to open
+the visualization in Lens.
+
+![Screenshot showing option to open in Lens](../images/hosts-open-in-lens.png)
+
+In Lens, you can examine all the fields and formulas used to create the
+visualization, make modifications to the visualization, and save your changes.
+
+For more information about using Lens, refer to the
+[((kib)) documentation about Lens](((kibana-ref))/lens.html).
+
+
+
+## View logs
+
+On the **Logs** tab of the **Hosts** page, view logs for the systems you are monitoring and search
+for specific log entries. This view shows logs for all of the hosts returned by
+the current query.
+
+![Screenshot showing Logs view](../images/hosts-logs.png)
+
+To see logs for a specific host, refer to .
+
+
+
+## View alerts
+
+On the **Alerts** tab of the **Hosts** page, view active alerts to pinpoint problems. Use this view
+to figure out which hosts triggered alerts and identify root causes. This view
+shows alerts for all of the hosts returned by the current query.
+
+From the **Actions** menu, you can choose to:
+
+* Add the alert to a new or existing case.
+* View rule details.
+* View alert details.
+
+![Screenshot showing Alerts view](../images/hosts-view-alerts.png)
+
+To see alerts for a specific host, refer to .
+
+
+
+ If your rules are triggering alerts that don't appear on the **Hosts** page,
+ edit the rules and make sure they are correctly configured to associate the host name with the alert:
+
+ * For Metric threshold or Custom threshold rules, select `host.name` in the **Group alerts by** field.
+ * For Inventory rules, select **Host** for the node type under **Conditions**.
+
+ To learn more about creating and managing rules, refer to .
+
+
+
+
+## View host details
+
+Without leaving the **Hosts** page, you can view enhanced metrics relating to
+each host running in your infrastructure. In the list of hosts, find the host
+you want to monitor, then click the **Toggle dialog with details**
+icon to display the host details overlay.
+
+
+To expand the overlay and view more detail, click **Open as page** in the upper-right corner.
+
+
+The host details overlay contains the following tabs:
+
+
+
+
+These metrics are also available when viewing hosts on the **Inventory**
+page.
+
+
+
+
+## Why am I seeing dashed lines in charts?
+
+There are a few reasons why you may see dashed lines in your charts.
+
+* The chart interval is too short
+* Data is missing
+* The chart interval is too short and data is missing
+
+
+
+### The chart interval is too short
+
+In this example, the data emission rate is lower than the Lens chart interval.
+A dashed line connects the known data points to make it easier to visualize trends in the data.
+
+![Screenshot showing dashed chart](../images/hosts-dashed.png)
+
+The chart interval is automatically set depending on the selected time duration.
+To fix this problem, change the selected time range at the top of the page.
+
+
+Want to dig in further while maintaining the selected time duration?
+Hover over the chart you're interested in and select **Options** → **Open in Lens**.
+Once in Lens, you can adjust the chart interval temporarily.
+Note that this change is not persisted in the **Hosts** view.
+
+
+
+
+### Data is missing
+
+A solid line indicates that the chart interval is set appropriately for the data transmission rate.
+In this example, a solid line turns into a dashed line—indicating missing data.
+You may want to investigate this time period to determine if there is an outage or issue.
+
+![Screenshot showing missing data](../images/hosts-missing-data.png)
+
+### The chart interval is too short and data is missing
+
+In the example shown in the screenshot,
+the data emission rate is lower than the Lens chart interval **and** there is missing data.
+
+This missing data can be hard to spot at first glance.
+The green boxes outline regular data emissions, while the missing data is outlined in pink.
+Similar to the above scenario, you may want to investigate the time period with the missing data
+to determine if there is an outage or issue.
+
+![Screenshot showing dashed lines and missing data](../images/hosts-dashed-and-missing.png)
diff --git a/docs/en/serverless/infra-monitoring/aws-metrics.mdx b/docs/en/serverless/infra-monitoring/aws-metrics.mdx
new file mode 100644
index 0000000000..32829da971
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/aws-metrics.mdx
@@ -0,0 +1,84 @@
+---
+id: serverlessObservabilityAwsMetrics
+slug: /serverless/observability/aws-metrics
+title: AWS metrics
+description: Learn about key metrics used for AWS monitoring.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+
+
+Additional AWS charges for GetMetricData API requests are generated using this module.
+
+
+
+
+
+## Monitor EC2 instances
+
+To analyze EC2 instance metrics,
+you can select view filters based on the following predefined metrics,
+or you can add custom metrics.
+
+| | |
+|---|---|
+| **CPU Usage** | Average of `aws.ec2.cpu.total.pct`. |
+| **Inbound Traffic** | Average of `aws.ec2.network.in.bytes_per_sec`. |
+| **Outbound Traffic** | Average of `aws.ec2.network.out.bytes_per_sec`. |
+| **Disk Reads (Bytes)** | Average of `aws.ec2.diskio.read.bytes_per_sec`. |
+| **Disk Writes (Bytes)** | Average of `aws.ec2.diskio.write.bytes_per_sec`. |
+
+
+
+## Monitor S3 buckets
+
+To analyze S3 bucket metrics,
+you can select view filters based on the following predefined metrics,
+or you can add custom metrics.
+
+| | |
+|---|---|
+| **Bucket Size** | Average of `aws.s3_daily_storage.bucket.size.bytes`. |
+| **Total Requests** | Average of `aws.s3_request.requests.total`. |
+| **Number of Objects** | Average of `aws.s3_daily_storage.number_of_objects`. |
+| **Downloads (Bytes)** | Average of `aws.s3_request.downloaded.bytes`. |
+| **Uploads (Bytes)** | Average of `aws.s3_request.uploaded.bytes`. |
+
+
+
+## Monitor SQS queues
+
+To analyze SQS queue metrics,
+you can select view filters based on the following predefined metrics,
+or you can add custom metrics.
+
+| | |
+|---|---|
+| **Messages Available** | Max of `aws.sqs.messages.visible`. |
+| **Messages Delayed** | Max of `aws.sqs.messages.delayed`. |
+| **Messages Added** | Max of `aws.sqs.messages.sent`. |
+| **Messages Returned Empty** | Max of `aws.sqs.messages.not_visible`. |
+| **Oldest Message** | Max of `aws.sqs.oldest_message_age.sec`. |
+
+
+
+## Monitor RDS databases
+
+To analyze RDS database metrics,
+you can select view filters based on the following predefined metrics,
+or you can add custom metrics.
+
+| | |
+|---|---|
+| **CPU Usage** | Average of `aws.rds.cpu.total.pct`. |
+| **Connections** | Average of `aws.rds.database_connections`. |
+| **Queries Executed** | Average of `aws.rds.queries`. |
+| **Active Transactions** | Average of `aws.rds.transactions.active`. |
+| **Latency** | Average of `aws.rds.latency.dml`. |
+
+For information about the fields used by the Infrastructure UI to display AWS services metrics, see the
+.
\ No newline at end of file
diff --git a/docs/en/serverless/infra-monitoring/configure-infra-settings.mdx b/docs/en/serverless/infra-monitoring/configure-infra-settings.mdx
new file mode 100644
index 0000000000..aabd6c3a83
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/configure-infra-settings.mdx
@@ -0,0 +1,41 @@
+---
+id: serverlessObservabilityConfigureInfraSettings
+slug: /serverless/observability/configure-intra-settings
+title: Configure settings
+description: Learn how to configure infrastructure UI settings.
+tags: [ 'serverless', 'observability', 'how to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+From the main ((observability)) menu, go to **Infrastructure** → **Inventory** or **Hosts**,
+and click the **Settings** link at the top of the page.
+The following settings are available:
+
+
+
+ **Name**
+ Name of the source configuration.
+
+
+ **Indices**
+ ((ipm-cap)) or patterns used to match ((es)) indices that contain metrics. The default patterns are `metrics-*,metricbeat-*`.
+
+
+ **Machine Learning**
+ The minimum severity score required to display anomalies in the Infrastructure UI. The default is 50.
+
+
+ **Features**
+ Turn new features on and off.
+
+
+Click **Apply** to save your changes.
+
+If the fields are grayed out and cannot be edited, you may not have sufficient privileges to change the source configuration.
diff --git a/docs/en/serverless/infra-monitoring/detect-metric-anomalies.mdx b/docs/en/serverless/infra-monitoring/detect-metric-anomalies.mdx
new file mode 100644
index 0000000000..21c76421d6
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/detect-metric-anomalies.mdx
@@ -0,0 +1,78 @@
+---
+id: serverlessObservabilityDetectMetricAnomalies
+slug: /serverless/observability/detect-metric-anomalies
+title: Detect metric anomalies
+description: Detect and inspect memory usage and network traffic anomalies for hosts and Kubernetes pods.
+tags: [ 'serverless', 'observability', 'how to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+You can create ((ml)) jobs to detect and inspect memory usage and network traffic anomalies for hosts and Kubernetes pods.
+
+You can model system memory usage, along with inbound and outbound network traffic across hosts or pods.
+You can detect unusual increases in memory usage and unusually high inbound or outbound traffic across hosts or pods.
+
+
+
+## Enable ((ml)) jobs for hosts or Kubernetes pods
+
+Create a ((ml)) job to detect anomalous memory usage and network traffic automatically.
+
+After creating ((ml)) jobs, you cannot change the settings.
+You can recreate these jobs later.
+However, you will remove any previously detected anomalies.
+
+{/* lint ignore anomaly-detection observability */}
+1. In your ((observability)) project, go to **Infrastructure** → **Inventory**
+and click the **Anomaly detection** link at the top of the page.
+1. Under **Hosts** or **Kubernetes Pods**, click **Enable** to create a ((ml)) job.
+1. Choose a start date for the ((ml)) analysis. ((ml-cap)) jobs analyze the last four weeks of data and continue to run indefinitely.
+1. Select a partition field.
+ Partitions allow you to create independent models for different groups of data that share similar behavior.
+ For example, you may want to build separate models for machine type or cloud availability zone so that anomalies are not weighted equally across groups.
+1. By default, ((ml)) jobs analyze all of your metric data.
+ You can filter this list to view only the jobs or metrics that you are interested in.
+ For example, you can filter by job name and node name to view specific ((anomaly-detect)) jobs for that host.
+1. Click **Enable jobs**.
+1. You're now ready to explore your metric anomalies. Click **Anomalies**.
+
+![Infrastructure ((ml-app)) anomalies](../images/metrics-ml-jobs.png)
+
+The **Anomalies** table displays a list of each single metric ((anomaly-detect)) job for the specific host or Kubernetes pod.
+By default, anomaly jobs are sorted by time to show the most recent job.
+
+Along with each anomaly job and the node name,
+detected anomalies with a severity score equal to 50 or higher are listed.
+These scores represent a severity of "warning" or higher in the selected time period.
+The **summary** value represents the increase between the actual value and the expected ("typical") value of the metric in the anomaly record result.
+
+To drill down and analyze the metric anomaly,
+select **Actions → Open in Anomaly Explorer** to view the Anomaly Explorer.
+You can also select **Actions** → **Show in Inventory** to view the host or Kubernetes pods Inventory page,
+filtered by the specific metric.
+
+
+
+These predefined ((anomaly-jobs)) use [custom rules](((ml-docs))/ml-rules.html).
+To update the rules in the Anomaly Explorer, select **Actions** → **Configure rules**.
+The changes only take effect for new results.
+If you want to apply the changes to existing results, clone and rerun the job.
+
+
+
+
+
+## History chart
+
+On the **Inventory** page, click **Show history** to view the metric values within the selected time frame.
+Detected anomalies with an anomaly score equal to 50 or higher are highlighted in red.
+To examine the detected anomalies, use the Anomaly Explorer.
+
+![History](../images/metrics-history-chart.png)
diff --git a/docs/en/serverless/infra-monitoring/docker-container-metrics.mdx b/docs/en/serverless/infra-monitoring/docker-container-metrics.mdx
new file mode 100644
index 0000000000..70c30370da
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/docker-container-metrics.mdx
@@ -0,0 +1,25 @@
+---
+id: serverlessObservabilityDockerContainerMetrics
+slug: /serverless/observability/docker-container-metrics
+title: Docker container metrics
+description: Learn about key metrics used for Docker container monitoring.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+To analyze Docker container metrics,
+you can select view filters based on the following predefined metrics,
+or you can add custom metrics.
+
+| | |
+|---|---|
+| **CPU Usage** | Average of `docker.cpu.total.pct`. |
+| **Memory Usage** | Average of `docker.memory.usage.pct`. |
+| **Inbound Traffic** | Derivative of the maximum of `docker.network.in.bytes` scaled to a 1 second rate. |
+| **Outbound Traffic** | Derivative of the maximum of `docker.network.out.bytes` scaled to a 1 second rate. |
+
+For information about the fields used by the Infrastructure UI to display Docker container metrics, see the
+.
\ No newline at end of file
diff --git a/docs/en/serverless/infra-monitoring/get-started-with-metrics.mdx b/docs/en/serverless/infra-monitoring/get-started-with-metrics.mdx
new file mode 100644
index 0000000000..009761eb48
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/get-started-with-metrics.mdx
@@ -0,0 +1,58 @@
+---
+id: serverlessObservabilityGetStartedWithMetrics
+slug: /serverless/observability/get-started-with-metrics
+title: Get started with system metrics
+description: Learn how to onboard your system metrics data quickly.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+In this guide you'll learn how to onboard system metrics data from a machine or server,
+then observe the data in Elastic ((observability)).
+
+To onboard system metrics data:
+
+1. Create a new ((observability)) project, or open an existing one.
+1. In your ((observability)) project, go to **Project Settings** → **Integrations**.
+1. Type **System** in the search bar, then select the integration to see more details about it.
+1. Click **Add System**.
+1. Follow the in-product steps to install the System integration and deploy an ((agent)).
+The sequence of steps varies depending on whether you have already installed an integration.
+
+ * When configuring the System integration, make sure that **Collect metrics from System instances** is turned on.
+ * Expand each configuration section to verify that the settings are correct for your host.
+ For example, you may want to turn on **System core metrics** to get a complete view of your infrastructure.
+
+Notice that you can also configure the integration to collect logs.
+
+
+ Do not try to deploy a second ((agent)) to the same system.
+ You have a couple options:
+
+ * **Use the System integration to collect system logs and metrics.** To do this,
+ uninstall the standalone agent you deployed previously,
+ then follow the in-product steps to install the System integration and deploy an ((agent)).
+ * **Configure your existing standalone agent to collect metrics.** To do this,
+ edit the deployed ((agent))'s YAML file and add metric inputs to the configuration manually.
+ Manual configuration is a time-consuming process.
+ To save time, you can follow the in-product steps that describe how to deploy a standalone ((agent)),
+ and use the generated configuration as source for the input configurations that you need to add to your standalone config file.
+
+
+After the agent is installed and successfully streaming metrics data,
+go to **Infrastructure** → **Inventory** or **Hosts** to see a metrics-driven view of your infrastructure.
+To learn more, refer to or .
+
+## Next steps
+
+Now that you've added metrics and explored your data,
+learn how to onboard other types of data:
+
+*
+*
+*
diff --git a/docs/en/serverless/infra-monitoring/host-metrics.mdx b/docs/en/serverless/infra-monitoring/host-metrics.mdx
new file mode 100644
index 0000000000..362446787e
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/host-metrics.mdx
@@ -0,0 +1,194 @@
+---
+id: serverlessObservabilityHostMetrics
+slug: /serverless/observability/host-metrics
+title: Host metrics
+description: Learn about key host metrics used for host monitoring.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+Learn about key host metrics displayed in the Infrastructure UI:
+
+* Hosts
+* CPU usage
+* Memory
+* Log
+* Network
+* Disk
+
+
+
+## Hosts metrics
+
+| Metric | Description |
+|---|---|
+| **Hosts** | Number of hosts returned by your search criteria. |
+
+
+
+## CPU usage metrics
+
+
+
+ **CPU Usage (%)**
+
+ Percentage of CPU time spent in states other than Idle and IOWait, normalized by the number of CPU cores. This includes both time spent on user space and kernel space.
+
+ 100% means all CPUs of the host are busy.
+
+
+
+ **CPU Usage - iowait (%)**
+ The percentage of CPU time spent in wait (on disk).
+
+
+ **CPU Usage - irq (%)**
+ The percentage of CPU time spent servicing and handling hardware interrupts.
+
+
+ **CPU Usage - nice (%)**
+ The percentage of CPU time spent on low-priority processes.
+
+
+ **CPU Usage - softirq (%)**
+ The percentage of CPU time spent servicing and handling software interrupts.
+
+
+ **CPU Usage - steal (%)**
+ The percentage of CPU time spent in involuntary wait by the virtual CPU while the hypervisor was servicing another processor. Available only on Unix.
+
+
+ **CPU Usage - system (%)**
+ The percentage of CPU time spent in kernel space.
+
+
+ **CPU Usage - user (%)**
+ The percentage of CPU time spent in user space. On multi-core systems, you can have percentages that are greater than 100%. For example, if 3 cores are at 60% use, then the system.cpu.user.pct will be 180%.
+
+
+ **Load (1m)**
+
+ 1 minute load average.
+
+ Load average gives an indication of the number of threads that are runnable (either busy running on CPU, waiting to run, or waiting for a blocking IO operation to complete).
+
+
+
+ **Load (5m)**
+
+ 5 minute load average.
+
+ Load average gives an indication of the number of threads that are runnable (either busy running on CPU, waiting to run, or waiting for a blocking IO operation to complete).
+
+
+
+ **Load (15m)**
+
+ 15 minute load average.
+
+ Load average gives an indication of the number of threads that are runnable (either busy running on CPU, waiting to run, or waiting for a blocking IO operation to complete).
+
+
+
+ **Normalized Load**
+
+ 1 minute load average normalized by the number of CPU cores.
+
+ Load average gives an indication of the number of threads that are runnable (either busy running on CPU, waiting to run, or waiting for a blocking IO operation to complete).
+
+ 100% means the 1 minute load average is equal to the number of CPU cores of the host.
+
+ Taking the example of a 32 CPU cores host, if the 1 minute load average is 32, the value reported here is 100%. If the 1 minute load average is 48, the value reported here is 150%.
+
+
+
+
+
+
+## Memory metrics
+
+
+
+ **Memory Cache**
+ Memory (page) cache.
+
+
+ **Memory Free**
+ Total available memory.
+
+
+ **Memory Free (excluding cache)**
+ Total available memory excluding the page cache.
+
+
+ **Memory Total**
+ Total memory capacity.
+
+
+ **Memory Usage (%)**
+
+ Percentage of main memory usage excluding page cache.
+
+ This includes resident memory for all processes plus memory used by the kernel structures and code apart from the page cache.
+
+ A high level indicates a situation of memory saturation for the host. For example, 100% means the main memory is entirely filled with memory that can't be reclaimed, except by swapping out.
+
+
+
+ **Memory Used**
+ Main memory usage excluding page cache.
+
+
+
+
+
+## Log metrics
+
+| Metric | Description |
+|---|---|
+| **Log Rate** | Derivative of the cumulative sum of the document count scaled to a 1 second rate. This metric relies on the same indices as the logs. |
+
+
+
+## Network metrics
+
+| Metric | Description |
+|---|---|
+| **Network Inbound (RX)** | Number of bytes that have been received per second on the public interfaces of the hosts. |
+| **Network Inbound (TX)** | Number of bytes that have been sent per second on the public interfaces of the hosts. |
+
+
+
+## Disk metrics
+
+| Metric | Description |
+|---|---|
+| **Disk Latency** | Time spent to service disk requests. |
+| **Disk Read IOPS** | Average count of read operations from the device per second. |
+| **Disk Read Throughput** | Average number of bytes read from the device per second. |
+| **Disk Usage - Available (%)** | Percentage of disk space available. |
+| **Disk Usage - Max (%)** | Percentage of disk space used. A high percentage indicates that a partition on a disk is running out of space. |
+| **Disk Write IOPS** | Average count of write operations from the device per second. |
+| **Disk Write Throughput** | Average number of bytes written from the device per second. |
+
diff --git a/docs/en/serverless/infra-monitoring/infra-monitoring.mdx b/docs/en/serverless/infra-monitoring/infra-monitoring.mdx
new file mode 100644
index 0000000000..69542ac125
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/infra-monitoring.mdx
@@ -0,0 +1,36 @@
+---
+id: serverlessObservabilityInfrastructureMonitoring
+slug: /serverless/observability/infrastructure-monitoring
+title: Infrastructure monitoring
+description: Monitor metrics from your servers, Docker, Kubernetes, Prometheus, and other services and applications.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+
+
+Elastic ((observability)) allows you to visualize infrastructure metrics to help diagnose problematic spikes,
+identify high resource utilization, automatically discover and track pods,
+and unify your metrics with logs and APM data.
+
+Using ((agent)) integrations, you can ingest and analyze metrics from servers,
+Docker containers, Kubernetes orchestrations, explore and analyze application
+telemetry, and more.
+
+For more information, refer to the following links:
+
+* :
+Learn how to onboard your system metrics data quickly.
+* :
+Use the **Inventory page** to get a metrics-driven view of your infrastructure grouped by resource type.
+* :
+Use the **Hosts** page to get a metrics-driven view of your infrastructure backed by an easy-to-use interface called Lens.
+* : Detect and inspect memory usage and network traffic anomalies for hosts and Kubernetes pods.
+* : Learn how to configure infrastructure UI settings.
+* : Learn about key metrics used for infrastructure monitoring.
+* : Learn about the fields required to display data in the Infrastructure UI.
+
+By default, the Infrastructure UI displays metrics from ((es)) indices that
+match the `metrics-*` and `metricbeat-*` index patterns. To learn how to change
+this behavior, refer to Configure settings.
diff --git a/docs/en/serverless/infra-monitoring/kubernetes-pod-metrics.mdx b/docs/en/serverless/infra-monitoring/kubernetes-pod-metrics.mdx
new file mode 100644
index 0000000000..6c3925f840
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/kubernetes-pod-metrics.mdx
@@ -0,0 +1,26 @@
+---
+id: serverlessObservabilityKubernetesPodMetrics
+slug: /serverless/observability/kubernetes-pod-metrics
+title: Kubernetes pod metrics
+description: Learn about key metrics used for Kubernetes monitoring.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+To analyze Kubernetes pod metrics,
+you can select view filters based on the following predefined metrics,
+or you can add custom metrics.
+
+
+| | |
+|---|---|
+| **CPU Usage** | Average of `kubernetes.pod.cpu.usage.node.pct`. |
+| **Memory Usage** | Average of `kubernetes.pod.memory.usage.node.pct`. |
+| **Inbound Traffic** | Derivative of the maximum of `kubernetes.pod.network.rx.bytes` scaled to a 1 second rate. |
+| **Outbound Traffic** | Derivative of the maximum of `kubernetes.pod.network.tx.bytes` scaled to a 1 second rate. |
+
+For information about the fields used by the Infrastructure UI to display Kubernetes pod metrics, see the
+.
diff --git a/docs/en/serverless/infra-monitoring/metrics-app-fields.mdx b/docs/en/serverless/infra-monitoring/metrics-app-fields.mdx
new file mode 100644
index 0000000000..e7845bc1b7
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/metrics-app-fields.mdx
@@ -0,0 +1,452 @@
+---
+id: serverlessObservabilityMetricsFields
+slug: /serverless/observability/infrastructure-monitoring-required-fields
+title: Required fields
+description: Learn about the fields required to display data in the Infrastructure UI.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+This section lists the fields the Infrastructure UI uses to display data.
+Please note that some of the fields listed here are not [ECS fields](((ecs-ref))/ecs-reference.html#_what_is_ecs).
+
+## Additional field details
+
+The `event.dataset` field is required to display data properly in some views. This field
+is a combination of `metricset.module`, which is the ((metricbeat)) module name, and `metricset.name`,
+which is the metricset name.
+
+To determine each metric's optimal time interval, all charts use `metricset.period`.
+If `metricset.period` is not available, then it falls back to 1 minute intervals.
+
+
+
+## Base fields
+
+The `base` field set contains all fields which are on the top level. These fields are common across all types of events.
+
+
+
+
+ `@timestamp`
+
+ Date/time when the event originated.
+
+ This is the date/time extracted from the event, typically representing when the source generated the event.
+ If the event source has no original timestamp, this value is typically populated by the first time the pipeline received the event.
+ Required field for all events.
+
+ Example: `May 27, 2020 @ 15:22:27.982`
+
+ date
+
+
+
+ `message`
+
+ For log events the message field contains the log message, optimized for viewing in a log viewer.
+
+ For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event.
+
+ If multiple messages exist, they can be combined into one message.
+
+ Example: `Hello World`
+
+ text
+
+
+
+
+
+## Hosts fields
+
+These fields must be mapped to display host data in the ((infrastructure-app)).
+
+
+
+
+ `host.name`
+
+ Name of the host.
+
+ It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use.
+
+ Example: `MacBook-Elastic.local`
+
+ keyword
+
+
+ `host.ip`
+
+ IP of the host that records the event.
+
+ ip
+
+
+
+
+
+## Docker container fields
+
+These fields must be mapped to display Docker container data in the ((infrastructure-app)).
+
+
+
+ `container.id`
+
+ Unique container id.
+
+ Example: `data`
+
+ keyword
+
+
+
+ `container.name`
+
+ Container name.
+
+ keyword
+
+
+
+ `container.ip_address`
+
+ IP of the container.
+
+ *Not an ECS field*
+
+ ip
+
+
+
+
+
+
+## Kubernetes pod fields
+
+These fields must be mapped to display Kubernetes pod data in the ((infrastructure-app)).
+
+
+
+
+ `kubernetes.pod.uid`
+
+ Kubernetes Pod UID.
+
+ Example: `8454328b-673d-11ea-7d80-21010a840123`
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+ `kubernetes.pod.name`
+
+ Kubernetes pod name.
+
+ Example: `nginx-demo`
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+ `kubernetes.pod.ip`
+
+ IP of the Kubernetes pod.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+
+
+## AWS EC2 instance fields
+
+These fields must be mapped to display EC2 instance data in the ((infrastructure-app)).
+
+
+
+
+ `cloud.instance.id`
+
+ Instance ID of the host machine.
+
+ Example: `i-1234567890abcdef0`
+
+ keyword
+
+
+
+ `cloud.instance.name`
+
+ Instance name of the host machine.
+
+ keyword
+
+
+
+ `aws.ec2.instance.public.ip`
+
+ Instance public IP of the host machine.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+
+
+## AWS S3 bucket fields
+
+These fields must be mapped to display S3 bucket data in the ((infrastructure-app)).
+
+
+
+ `aws.s3.bucket.name`
+
+ The name or ID of the AWS S3 bucket.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+
+
+## AWS SQS queue fields
+
+These fields must be mapped to display SQS queue data in the ((infrastructure-app)).
+
+
+
+
+ `aws.sqs.queue.name`
+
+ The name or ID of the AWS SQS queue.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+
+
+## AWS RDS database fields
+
+These fields must be mapped to display RDS database data in the ((infrastructure-app)).
+
+
+
+
+ `aws.rds.db_instance.arn`
+
+ Amazon Resource Name (ARN) for each RDS.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+ `aws.rds.db_instance.identifier`
+
+ Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+
+
+## Additional grouping fields
+
+Depending on which entity you select in the **Inventory** view, these additional fields can be mapped to group entities by.
+
+
+
+
+ `cloud.availability_zone`
+
+ Availability zone in which this host is running.
+
+ Example: `us-east-1c`
+
+ keyword
+
+
+
+ `cloud.machine.type`
+
+ Machine type of the host machine.
+
+ Example: `t2.medium`
+
+ keyword
+
+
+
+ `cloud.region`
+
+ Region in which this host is running.
+
+ Example: `us-east-1`
+
+ keyword
+
+
+
+ `cloud.instance.id`
+
+ Instance ID of the host machine.
+
+ Example: `i-1234567890abcdef0`
+
+ keyword
+
+
+
+ `cloud.provider`
+
+ Name of the cloud provider. Example values are `aws`, `azure`, `gcp`, or `digitalocean`.
+
+ Example: `aws`
+
+ keyword
+
+
+
+ `cloud.instance.name`
+
+ Instance name of the host machine.
+
+ keyword
+
+
+
+ `cloud.project.id`
+
+ Name of the project in Google Cloud.
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+ `service.type`
+
+ The type of service data is collected from.
+
+ The type can be used to group and correlate logs and metrics from one service type.
+
+ For example, the service type for metrics collected from ((es)) is `elasticsearch`.
+
+ Example: `elasticsearch`
+
+ *Not an ECS field*
+
+ keyword
+
+
+
+ `host.hostname`
+
+ Name of the host. This field is required if you want to use ((ml-features))
+
+ It normally contains what the `hostname` command returns on the host machine.
+
+ Example: `Elastic.local`
+
+ keyword
+
+
+
+ `host.os.name`
+
+ Operating system name, without the version.
+
+ Multi-fields:
+
+ os.name.text (type: text)
+
+ Example: `Mac OS X`
+
+ keyword
+
+
+
+ `host.os.kernel`
+
+ Operating system kernel version as a raw string.
+
+ Example: `4.4.0-112-generic`
+
+ keyword
+
+
diff --git a/docs/en/serverless/infra-monitoring/metrics-reference.mdx b/docs/en/serverless/infra-monitoring/metrics-reference.mdx
new file mode 100644
index 0000000000..f0f4e4b12d
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/metrics-reference.mdx
@@ -0,0 +1,19 @@
+---
+id: serverlessObservabilityMetricsReference
+slug: /serverless/observability/metrics-reference
+title: Metrics reference
+description: Learn about key metrics used for infrastructure monitoring.
+tags: [ 'serverless', 'observability', 'reference' ]
+---
+
+
+
+
+
+Learn about the key metrics displayed in the Infrastructure UI and how they
+are calculated.
+
+* Host metrics
+* Kubernetes pod metrics
+* Docker container metrics
+* AWS metrics
\ No newline at end of file
diff --git a/docs/en/serverless/infra-monitoring/view-infrastructure-metrics.mdx b/docs/en/serverless/infra-monitoring/view-infrastructure-metrics.mdx
new file mode 100644
index 0000000000..70141b27c9
--- /dev/null
+++ b/docs/en/serverless/infra-monitoring/view-infrastructure-metrics.mdx
@@ -0,0 +1,117 @@
+---
+id: serverlessObservabilityViewInfrastructureMetrics
+slug: /serverless/observability/view-infrastructure-metrics
+title: View infrastructure metrics by resource type
+description: Get a metrics-driven view of your infrastructure grouped by resource type.
+tags: [ 'serverless', 'observability', 'how to' ]
+---
+
+
+
+import HostDetails from '../transclusion/host-details.mdx'
+
+
+
+The **Inventory** page provides a metrics-driven view of your entire infrastructure grouped by
+the resources you are monitoring. All monitored resources emitting
+a core set of infrastructure metrics are displayed to give you a quick view of the overall health
+of your infrastructure.
+
+To access the **Inventory** page, in your ((observability)) project,
+go to **Infrastructure** → **Inventory**.
+
+![Infrastructure UI in ((kib))](../images/metrics-app.png)
+
+To learn more about the metrics shown on this page, refer to the .
+
+
+
+If you haven't added data yet, click **Add data** to search for and install an Elastic integration.
+
+Need help getting started? Follow the steps in
+Get started with system metrics.
+
+
+
+
+
+## Filter the Inventory view
+
+To get started with your analysis, select the type of resources you want to show
+in the high-level view. From the **Show** menu, select one of the following:
+
+* **Hosts** (the default)
+* **Kubernetes Pods**
+* **Docker Containers**
+* **AWS**, which includes EC2 instances, S3 buckets, RDS databases, and SQS queues
+
+When you hover over each resource in the waffle map, the metrics specific to
+that resource are displayed.
+
+You can sort by resource, group the resource by specific fields related to it, and sort by
+either name or metric value. For example, you can filter the view to display the memory usage
+of your Kubernetes pods, grouped by namespace, and sorted by the memory usage value.
+
+![Kubernetes pod filtering](../images/kubernetes-filter.png)
+
+You can also use the search bar to create structured queries using [((kib)) Query Language](((kibana-ref))/kuery-query.html).
+For example, enter `host.hostname : "host1"` to view only the information for `host1`.
+
+To examine the metrics for a specific time, use the time filter to select the date and time.
+
+
+
+## View host metrics
+
+By default the **Inventory** page displays a waffle map that shows the hosts you
+are monitoring and the current CPU usage for each host.
+Alternatively, you can click the **Table view** icon
+to switch to a table view.
+
+Without leaving the **Inventory** page, you can view enhanced metrics relating to each host
+running in your infrastructure. On the waffle map, select a host to display the host details
+overlay.
+
+
+To expand the overlay and view more detail, click **Open as page** in the upper-right corner.
+
+
+The host details overlay contains the following tabs:
+
+
+
+
+These metrics are also available when viewing hosts on the **Hosts**
+page.
+
+
+
+
+## View metrics for other resources
+
+When you have searched and filtered for a specific resource, you can drill down to analyze the
+metrics relating to it. For example, when viewing Kubernetes Pods in the high-level view,
+click the Pod you want to analyze and select **Kubernetes Pod metrics** to see detailed metrics:
+
+![Kubernetes pod metrics](../images/pod-metrics.png)
+
+
+
+## Add custom metrics
+
+If the predefined metrics displayed on the Inventory page for each resource are not
+sufficient for your specific use case, you can add and define custom metrics.
+
+Select your resource, and from the **Metric** filter menu, click **Add metric**.
+
+![Add custom metrics](../images/add-custom-metric.png)
+
+
+
+## Integrate with Logs and APM
+
+Depending on the features you have installed and configured, you can view logs or traces relating to a specific resource.
+For example, in the high-level view, when you click a Kubernetes Pod resource, you can choose:
+
+* **Kubernetes Pod logs** to in the ((logs-app)).
+* **Kubernetes Pod APM traces** to in the ((apm-app)).
diff --git a/docs/en/serverless/logging/correlate-application-logs.mdx b/docs/en/serverless/logging/correlate-application-logs.mdx
new file mode 100644
index 0000000000..ccbb21365e
--- /dev/null
+++ b/docs/en/serverless/logging/correlate-application-logs.mdx
@@ -0,0 +1,90 @@
+---
+id: serverlessObservabilityCorrelateApplicationLogs
+slug: /serverless/observability/correlate-application-logs
+title: Stream application logs
+description: Learn about application logs and options for ingesting them.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+import CorrelateLogs from '../transclusion/observability/application-logs/correlate-logs.mdx'
+
+Application logs provide valuable insight into events that have occurred within your services and applications.
+
+The format of your logs (structured or plaintext) influences your log ingestion strategy.
+
+## Plaintext logs vs. structured Elastic Common Schema (ECS) logs
+
+Logs are typically produced as either plaintext or structured.
+Plaintext logs contain only text and have no special formatting, for example:
+
+```log
+2019-08-06T12:09:12.375Z INFO:spring-petclinic: Tomcat started on port(s): 8080 (http) with context path, org.springframework.boot.web.embedded.tomcat.TomcatWebServer
+2019-08-06T12:09:12.379Z INFO:spring-petclinic: Started PetClinicApplication in 7.095 seconds (JVM running for 9.082), org.springframework.samples.petclinic.PetClinicApplication
+2019-08-06T14:08:40.199Z DEBUG:spring-petclinic: init find form, org.springframework.samples.petclinic.owner.OwnerController
+```
+
+Structured logs follow a predefined, repeatable pattern or structure.
+This structure is applied at write time — preventing the need for parsing at ingest time.
+The Elastic Common Schema (ECS) defines a common set of fields to use when structuring logs.
+This structure allows logs to be easily ingested,
+and provides the ability to correlate, search, and aggregate on individual fields within your logs.
+
+For example, the previous example logs might look like this when structured with ECS-compatible JSON:
+
+```json
+{"@timestamp":"2019-08-06T12:09:12.375Z", "log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}
+{"@timestamp":"2019-08-06T12:09:12.379Z", "log.level": "INFO", "message":"Started PetClinicApplication in 7.095 seconds (JVM running for 9.082)", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.samples.petclinic.PetClinicApplication"}
+{"@timestamp":"2019-08-06T14:08:40.199Z", "log.level":"DEBUG", "message":"init find form", "service.name":"spring-petclinic","process.thread.name":"http-nio-8080-exec-8","log.logger":"org.springframework.samples.petclinic.owner.OwnerController","transaction.id":"28b7fb8d5aba51f1","trace.id":"2869b25b5469590610fea49ac04af7da"}
+```
+
+## Ingesting logs
+
+There are several ways to ingest application logs into your project.
+Your specific situation helps determine the method that's right for you.
+
+### Plaintext logs
+
+With ((filebeat)) or ((agent)), you can ingest plaintext logs, including existing logs, from any programming language or framework without modifying your application or its configuration.
+
+For plaintext logs to be useful, you need to use ((filebeat)) or ((agent)) to parse the log data.
+
+** Learn more in Plaintext logs**
+
+### ECS formatted logs
+
+Logs formatted in ECS don't require manual parsing and the configuration can be reused across applications. They also include log correlation. You can format your logs in ECS by using ECS logging plugins or ((apm-agent)) ECS reformatting.
+
+#### ECS logging plugins
+
+Add ECS logging plugins to your logging libraries to format your logs into ECS-compatible JSON that doesn't require parsing.
+
+To use ECS logging, you need to modify your application and its log configuration.
+
+** Learn more in ECS formatted logs**
+
+#### ((apm-agent)) log reformatting
+
+Some Elastic ((apm-agent))s can automatically reformat application logs to ECS format
+without adding an ECS logger dependency or modifying the application.
+
+This feature is supported for the following ((apm-agent))s:
+
+* [Ruby](((apm-ruby-ref))/log-reformat.html)
+* [Python](((apm-py-ref))/logs.html#log-reformatting)
+* [Java](((apm-java-ref))/logs.html#log-reformatting)
+
+** Learn more in ECS formatted logs**
+
+### ((apm-agent)) log sending
+
+Automatically capture and send logs directly to the managed intake service using the ((apm-agent)) without using ((filebeat)) or ((agent)).
+
+Log sending is supported in the Java ((apm-agent)).
+
+** Learn more in ((apm-agent)) log sending**
+
+## Log correlation
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/logging/ecs-application-logs.mdx b/docs/en/serverless/logging/ecs-application-logs.mdx
new file mode 100644
index 0000000000..17776d3efc
--- /dev/null
+++ b/docs/en/serverless/logging/ecs-application-logs.mdx
@@ -0,0 +1,186 @@
+---
+id: serverlessObservabilityECSApplicationLogs
+slug: /serverless/observability/ecs-application-logs
+title: ECS formatted application logs
+description: Use an ECS logger or an ((apm-agent)) to format your logs in ECS format.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import InstallWidget from '../transclusion/observability/tab-widgets/filebeat-install/widget.mdx'
+import SetupWidget from '../transclusion/observability/tab-widgets/filebeat-setup/widget.mdx'
+import StartWidget from '../transclusion/observability/tab-widgets/filebeat-start/widget.mdx'
+import ConfigureFilebeat from '../transclusion/observability/tab-widgets/filebeat-logs/widget.mdx'
+
+
+
+Logs formatted in Elastic Common Schema (ECS) don't require manual parsing, and the configuration can be reused across applications. ECS-formatted logs, when paired with an ((apm-agent)), allow you to correlate logs to easily view logs that belong to a particular trace.
+
+You can format your logs in ECS format the following ways:
+* **ECS loggers:** plugins for your logging libraries that reformat your logs into ECS format.
+* **((apm-agent)) ECS reformatting:** Java, Ruby, and Python ((apm-agent))s automatically reformat application logs to ECS format without a logger.
+
+## ECS loggers
+
+ECS loggers reformat your application logs into ECS-compatible JSON, removing the need for manual parsing.
+ECS loggers require ((filebeat)) or ((agent)) configured to monitor and capture application logs.
+In addition, pairing ECS loggers with your framework's ((apm-agent)) allows you to correlate logs to easily view logs that belong to a particular trace.
+
+### Get started
+
+For more information on adding an ECS logger to your application, refer to the guide for your framework:
+
+* [.NET](((ecs-logging-dotnet-ref))/setup.html)
+* Go: [zap](((ecs-logging-go-zap-ref))/setup.html), [logrus](((ecs-logging-go-logrus-ref))/setup.html)
+* [Java](((ecs-logging-java-ref))/setup.html)
+* Node.js: [morgan](((ecs-logging-nodejs-ref))/morgan.html), [pino](((ecs-logging-nodejs-ref))/pino.html), [winston](((ecs-logging-nodejs-ref))/winston.html)
+* [PHP](((ecs-logging-php-ref))/setup.html)
+* [Python](((ecs-logging-python-ref))/installation.html)
+* [Ruby](((ecs-logging-ruby-ref))/setup.html)
+
+
+
+## APM agent ECS reformatting
+
+Java, Ruby, and Python ((apm-agent))s can automatically reformat application logs to ECS format without an ECS logger or the need to modify your application. The ((apm-agent)) also allows for log correlation so you can easily view logs that belong to a particular trace.
+
+To set up log ECS reformatting:
+
+1. Enable ((apm-agent)) reformatting
+1. Ingest logs with ((filebeat)) or ((agent)).
+1. View logs in Logs Explorer
+
+### Enable log ECS reformatting
+
+Log ECS reformatting is controlled by the `log_ecs_reformatting` configuration option, and is disabled by default. Refer to the guide for your framework for information on enabling:
+
+* [Java](((apm-java-ref))/config-logging.html#config-log-ecs-reformatting)
+* [Ruby](((apm-ruby-ref))/configuration.html#config-log-ecs-formatting)
+* [Python](((apm-py-ref))/configuration.html#config-log_ecs_reformatting)
+
+### Ingest logs
+
+After enabling log ECS reformatting, send your application logs to your project using one of the following shipping tools:
+
+* **((filebeat)):** A lightweight data shipper that sends log data to your project.
+* **((agent)):** A single agent for logs, metrics, security data, and threat prevention. With Fleet, you can centrally manage ((agent)) policies and lifecycles directly from your project.
+
+### Ingest logs with ((filebeat))
+
+
+Use ((filebeat)) version 8.11+ for the best experience when ingesting logs with ((filebeat)).
+
+
+Follow these steps to ingest application logs with ((filebeat)).
+
+#### Step 1: Install ((filebeat))
+
+Install ((filebeat)) on the server you want to monitor by running the commands that align with your system:
+
+
+
+#### Step 2: Connect to your project
+
+Connect to your project using an API key to set up ((filebeat)). Set the following information in the `filebeat.yml` file:
+
+```yaml
+output.elasticsearch:
+ hosts: ["your-projects-elasticsearch-endpoint"]
+ api_key: "id:api_key"
+```
+
+1. Set the `hosts` to your project's ((es)) endpoint. Locate your project's endpoint by clicking the help icon () and selecting **Endpoints**. Add the **((es)) endpoint** to your configuration.
+1. From **Developer tools**, run the following command to create an API key that grants `manage` permissions for the `cluster` and the `filebeat-*` indices using:
+
+ ```shell
+ POST /_security/api_key
+ {
+ "name": "filebeat_host001",
+ "role_descriptors": {
+ "filebeat_writer": {
+ "cluster": ["manage"],
+ "index": [
+ {
+ "names": ["filebeat-*"],
+ "privileges": ["manage"]
+ }
+ ]
+ }
+ }
+ }
+ ```
+
+ Refer to [Grant access using API keys](((filebeat-ref))/beats-api-keys.html) for more information.
+
+#### Step 3: Configure ((filebeat))
+
+Add the following configuration to your `filebeat.yaml` file to start collecting log data.
+
+
+
+#### Step 4: Set up and start ((filebeat))
+
+From the ((filebeat)) installation directory, set the [index template](((ref))/index-templates.html) by running the command that aligns with your system:
+
+
+
+From the ((filebeat)) installation directory, start filebeat by running the command that aligns with your system:
+
+
+
+### Ingest logs with ((agent))
+
+Add the custom logs integration to ingest and centrally manage your logs using ((agent)) and ((fleet)):
+
+#### Step 1: Add the custom logs integration to your project
+
+To add the custom logs integration to your project:
+
+1. In your ((observability)) project, go to **Project Settings** → **Integrations**.
+1. Type `custom` in the search bar and select **Custom Logs**.
+1. Click **Install ((agent))** at the bottom of the page, and follow the instructions for your system to install the ((agent)). If you've already installed an ((agent)), you'll be taken directly to configuring your integration.
+1. After installing the ((agent)), click **Save and continue** to configure the integration from the **Add Custom Logs integration** page.
+1. Give your integration a meaningful name and description.
+1. Add the **Log file path**. For example, `/var/log/your-logs.log`.
+1. Under **Custom log file**, click **Advanced options**.
+
+1. In the **Processors** text box, add the following YAML configuration to add processors that enhance your data. See [processors](((filebeat-ref))/filtering-and-enhancing-data.html) to learn more.
+
+ ```yaml
+ processors:
+ - add_host_metadata: ~
+ - add_cloud_metadata: ~
+ - add_docker_metadata: ~
+ - add_kubernetes_metadata: ~
+ ```
+1. Under **Custom configurations**, add the following YAML configuration to collect data.
+
+ ```yaml
+ json:
+ overwrite_keys: true [^1]
+ add_error_key: true [^2]
+ expand_keys: true [^3]
+ keys_under_root: true [^4]
+ fields_under_root: true [^5]
+ fields:
+ service.name: your_service_name [^6]
+ service.version: your_service_version [^6]
+ service.environment: your_service_environment [^6]
+ ```
+ [^1]: Values from the decoded JSON object overwrite the fields that ((agent)) normally adds (type, source, offset, etc.) in case of conflicts.
+ [^2]: ((agent)) adds an "error.message" and "error.type: json" key in case of JSON unmarshalling errors.
+ [^3]: ((agent)) will recursively de-dot keys in the decoded JSON, and expand them into a hierarchical object structure.
+ [^4]: By default, the decoded JSON is placed under a "json" key in the output document. When set to `true`, the keys are copied top level in the output document.
+ [^5]: When set to `true`, custom fields are stored as top-level fields in the output document instead of being grouped under a fields sub-dictionary.
+ [^6]: The `service.name` (required), `service.version` (optional), and `service.environment` (optional) of the service you're collecting logs from, used for Log correlation.
+1. An agent policy is created that defines the data your ((agent)) collects. If you've previously installed an ((agent)) on the host you're collecting logs from, you can select the **Existing hosts** tab and use an existing agent policy.
+1. Click **Save and continue**.
+
+## View logs
+
+Use Logs Explorer to search, filter, and visualize your logs. Refer to the filter and aggregate logs documentation for more information.
\ No newline at end of file
diff --git a/docs/en/serverless/logging/filter-and-aggregate-logs.mdx b/docs/en/serverless/logging/filter-and-aggregate-logs.mdx
new file mode 100644
index 0000000000..7d003a7746
--- /dev/null
+++ b/docs/en/serverless/logging/filter-and-aggregate-logs.mdx
@@ -0,0 +1,342 @@
+---
+id: serverlessObservabilityFilterAndAggregateLogs
+slug: /serverless/observability/filter-and-aggregate-logs
+title: Filter and aggregate logs
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+Filter and aggregate your log data to find specific information, gain insight, and monitor your systems more efficiently. You can filter and aggregate based on structured fields like timestamps, log levels, and IP addresses that you've extracted from your log data.
+
+This guide shows you how to:
+
+* Filter logs: Narrow down your log data by applying specific criteria.
+* Aggregate logs: Analyze and summarize data to find patterns and gain insight.
+
+
+
+## Before you get started
+
+import Roles from '../partials/roles.mdx'
+
+
+
+The examples on this page use the following ingest pipeline and index template, which you can set in **Developer Tools**. If you haven't used ingest pipelines and index templates to parse your log data and extract structured fields yet, start with the Parse and organize logs documentation.
+
+Set the ingest pipeline with the following command:
+
+```console
+PUT _ingest/pipeline/logs-example-default
+{
+ "description": "Extracts the timestamp log level and host ip",
+ "processors": [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern": "%{@timestamp} %{log.level} %{host.ip} %{message}"
+ }
+ }
+ ]
+}
+```
+
+Set the index template with the following command:
+
+```console
+PUT _index_template/logs-example-default-template
+{
+ "index_patterns": [ "logs-example-*" ],
+ "data_stream": { },
+ "priority": 500,
+ "template": {
+ "settings": {
+ "index.default_pipeline":"logs-example-default"
+ }
+ },
+ "composed_of": [
+ "logs-mappings",
+ "logs-settings",
+ "logs@custom",
+ "ecs@dynamic_templates"
+ ],
+ "ignore_missing_component_templates": ["logs@custom"]
+}
+```
+
+
+
+## Filter logs
+
+Filter your data using the fields you've extracted so you can focus on log data with specific log levels, timestamp ranges, or host IPs. You can filter your log data in different ways:
+
+- Filter logs in Logs Explorer: Filter and visualize log data in Logs Explorer.
+- Filter logs with Query DSL: Filter log data from Developer Tools using Query DSL.
+
+
+
+### Filter logs in Logs Explorer
+
+Logs Explorer is a tool that automatically provides views of your log data based on integrations and data streams. To open Logs Explorer, go to **Discover** and select the **Logs Explorer** tab.
+
+From Logs Explorer, you can use the [((kib)) Query Language (KQL)](((kibana-ref))/kuery-query.html) in the search bar to narrow down the log data that's displayed.
+For example, you might want to look into an event that occurred within a specific time range.
+
+Add some logs with varying timestamps and log levels to your data stream:
+
+1. In your Observability project, go to **Developer Tools**.
+1. In the **Console** tab, run the following command:
+
+```console
+POST logs-example-default/_bulk
+{ "create": {} }
+{ "message": "2023-09-15T08:15:20.234Z WARN 192.168.1.101 Disk usage exceeds 90%." }
+{ "create": {} }
+{ "message": "2023-09-14T10:30:45.789Z ERROR 192.168.1.102 Critical system failure detected." }
+{ "create": {} }
+{ "message": "2023-09-10T14:20:45.789Z ERROR 192.168.1.105 Database connection lost." }
+{ "create": {} }
+{ "message": "2023-09-20T09:40:32.345Z INFO 192.168.1.106 User logout initiated." }
+```
+
+For this example, let's look for logs with a `WARN` or `ERROR` log level that occurred on September 14th or 15th. From Logs Explorer:
+
+1. Add the following KQL query in the search bar to filter for logs with log levels of `WARN` or `ERROR`:
+
+ ```text
+ log.level: ("ERROR" or "WARN")
+ ```
+
+1. Click the current time range, select **Absolute**, and set the **Start date** to `Sep 14, 2023 @ 00:00:00.000`.
+
+
+
+1. Click the end of the current time range, select **Absolute**, and set the **End date** to `Sep 15, 2023 @ 23:59:59.999`.
+
+
+
+Under the **Documents** tab, you'll see the filtered log data matching your query.
+
+![](../images/logs-kql-filter.png)
+
+For more on using Logs Explorer, refer to the [Discover](((kibana-ref))/discover.html) documentation.
+
+
+
+### Filter logs with Query DSL
+
+[Query DSL](((ref))/query-dsl.html) is a JSON-based language that sends requests and retrieves data from indices and data streams. You can filter your log data using Query DSL from **Developer Tools**.
+
+For example, you might want to troubleshoot an issue that happened on a specific date or at a specific time. To do this, use a boolean query with a [range query](((ref))/query-dsl-range-query.html) to filter for the specific timestamp range and a [term query](((ref))/query-dsl-term-query.html) to filter for `WARN` and `ERROR` log levels.
+
+First, from **Developer Tools**, add some logs with varying timestamps and log levels to your data stream with the following command:
+
+```console
+POST logs-example-default/_bulk
+{ "create": {} }
+{ "message": "2023-09-15T08:15:20.234Z WARN 192.168.1.101 Disk usage exceeds 90%." }
+{ "create": {} }
+{ "message": "2023-09-14T10:30:45.789Z ERROR 192.168.1.102 Critical system failure detected." }
+{ "create": {} }
+{ "message": "2023-09-10T14:20:45.789Z ERROR 192.168.1.105 Database connection lost." }
+{ "create": {} }
+{ "message": "2023-09-20T09:40:32.345Z INFO 192.168.1.106 User logout initiated." }
+```
+
+Let's say you want to look into an event that occurred between September 14th and 15th. The following boolean query filters for logs with timestamps during those days that also have a log level of `ERROR` or `WARN`.
+
+```console
+POST /logs-example-default/_search
+{
+ "query": {
+ "bool": {
+ "filter": [
+ {
+ "range": {
+ "@timestamp": {
+ "gte": "2023-09-14T00:00:00",
+ "lte": "2023-09-15T23:59:59"
+ }
+ }
+ },
+ {
+ "terms": {
+ "log.level": ["WARN", "ERROR"]
+ }
+ }
+ ]
+ }
+ }
+}
+```
+
+The filtered results should show `WARN` and `ERROR` logs that occurred within the timestamp range:
+
+```JSON
+{
+ ...
+ "hits": {
+ ...
+ "hits": [
+ {
+ "_index": ".ds-logs-example-default-2023.09.25-000001",
+ "_id": "JkwPzooBTddK4OtTQToP",
+ "_score": 0,
+ "_source": {
+ "message": "192.168.1.101 Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ },
+ "@timestamp": "2023-09-15T08:15:20.234Z"
+ }
+ },
+ {
+ "_index": ".ds-logs-example-default-2023.09.25-000001",
+ "_id": "A5YSzooBMYFrNGNwH75O",
+ "_score": 0,
+ "_source": {
+ "message": "192.168.1.102 Critical system failure detected.",
+ "log": {
+ "level": "ERROR"
+ },
+ "@timestamp": "2023-09-14T10:30:45.789Z"
+ }
+ }
+ ]
+ }
+}
+```
+
+
+
+## Aggregate logs
+Use aggregation to analyze and summarize your log data to find patterns and gain insight. [Bucket aggregations](((ref))/search-aggregations-bucket.html) organize log data into meaningful groups making it easier to identify patterns, trends, and anomalies within your logs.
+
+For example, you might want to understand error distribution by analyzing the count of logs per log level.
+
+First, from **Developer Tools**, add some logs with varying log levels to your data stream using the following command:
+
+```console
+POST logs-example-default/_bulk
+{ "create": {} }
+{ "message": "2023-09-15T08:15:20.234Z WARN 192.168.1.101 Disk usage exceeds 90%." }
+{ "create": {} }
+{ "message": "2023-09-14T10:30:45.789Z ERROR 192.168.1.102 Critical system failure detected." }
+{ "create": {} }
+{ "message": "2023-09-15T12:45:55.123Z INFO 192.168.1.103 Application successfully started." }
+{ "create": {} }
+{ "message": "2023-09-14T15:20:10.789Z WARN 192.168.1.104 Network latency exceeding threshold." }
+{ "create": {} }
+{ "message": "2023-09-10T14:20:45.789Z ERROR 192.168.1.105 Database connection lost." }
+{ "create": {} }
+{ "message": "2023-09-20T09:40:32.345Z INFO 192.168.1.106 User logout initiated." }
+{ "create": {} }
+{ "message": "2023-09-21T15:20:55.678Z DEBUG 192.168.1.102 Database connection established." }
+```
+
+Next, run this command to aggregate your log data using the `log.level` field:
+
+```console
+POST logs-example-default/_search?size=0&filter_path=aggregations
+{
+"size": 0, [^1]
+"aggs": {
+ "log_level_distribution": {
+ "terms": {
+ "field": "log.level"
+ }
+ }
+ }
+}
+```
+[^1]: Searches with an aggregation return both the query results and the aggregation, so you would see the logs matching the data and the aggregation. Setting `size` to `0` limits the results to aggregations.
+
+The results should show the number of logs in each log level:
+
+```JSON
+{
+ "aggregations": {
+ "error_distribution": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
+ "buckets": [
+ {
+ "key": "ERROR",
+ "doc_count": 2
+ },
+ {
+ "key": "INFO",
+ "doc_count": 2
+ },
+ {
+ "key": "WARN",
+ "doc_count": 2
+ },
+ {
+ "key": "DEBUG",
+ "doc_count": 1
+ }
+ ]
+ }
+ }
+}
+```
+
+You can also combine aggregations and queries. For example, you might want to limit the scope of the previous aggregation by adding a range query:
+
+```console
+GET /logs-example-default/_search
+{
+ "size": 0,
+ "query": {
+ "range": {
+ "@timestamp": {
+ "gte": "2023-09-14T00:00:00",
+ "lte": "2023-09-15T23:59:59"
+ }
+ }
+ },
+ "aggs": {
+ "my-agg-name": {
+ "terms": {
+ "field": "log.level"
+ }
+ }
+ }
+}
+```
+
+The results should show an aggregate of logs that occurred within your timestamp range:
+
+```JSON
+{
+ ...
+ "hits": {
+ ...
+ "hits": []
+ },
+ "aggregations": {
+ "my-agg-name": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
+ "buckets": [
+ {
+ "key": "WARN",
+ "doc_count": 2
+ },
+ {
+ "key": "ERROR",
+ "doc_count": 1
+ },
+ {
+ "key": "INFO",
+ "doc_count": 1
+ }
+ ]
+ }
+ }
+}
+```
+
+For more on aggregation types and available aggregations, refer to the [Aggregations](((ref))/search-aggregations.html) documentation.
diff --git a/docs/en/serverless/logging/get-started-with-logs.mdx b/docs/en/serverless/logging/get-started-with-logs.mdx
new file mode 100644
index 0000000000..a03f38ad12
--- /dev/null
+++ b/docs/en/serverless/logging/get-started-with-logs.mdx
@@ -0,0 +1,46 @@
+---
+id: serverlessObservabilityGetStartedWithLogs
+slug: /serverless/observability/get-started-with-logs
+title: Get started with system logs
+description: Learn how to onboard your system log data quickly.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+In this guide you'll learn how to onboard system log data from a machine or server,
+then observe the data in **Logs Explorer**.
+
+To onboard system log data:
+
+1. Create a new ((observability)) project, or open an existing one.
+1. In your ((observability)) project, go to **Add data**.
+1. Under **Collect and analyze logs**, click **Stream host system logs**.
+When the page loads, the system integration is installed automatically, and a new API key is created.
+Make sure you copy the API key and store it in a secure location.
+1. Follow the in-product steps to install and configure the ((agent)).
+Notice that you can choose to download the agent's config automatically to avoid adding it manually.
+
+After the agent is installed and successfully streaming log data, you can view the data in the UI:
+
+1. From the navigation menu, go to **Discover** and select the **Logs Explorer** tab. The view shows all log datasets.
+Notice you can add fields, change the view, expand a document to see details,
+and perform other actions to explore your data.
+1. Click **All log datasets** and select **System** → **syslog** to show syslog logs.
+
+![Screen capture of the Logs Explorer showing syslog dataset selected](../images/log-explorer-select-syslogs.png)
+
+## Next steps
+
+Now that you've added system logs and explored your data,
+learn how to onboard other types of data:
+
+*
+*
+
+To onboard other types of data, select **Add Data** from the main menu.
+
diff --git a/docs/en/serverless/logging/log-monitoring.mdx b/docs/en/serverless/logging/log-monitoring.mdx
new file mode 100644
index 0000000000..4b95b5e6dd
--- /dev/null
+++ b/docs/en/serverless/logging/log-monitoring.mdx
@@ -0,0 +1,91 @@
+---
+id: serverlessObservabilityLogMonitoring
+slug: /serverless/observability/log-monitoring
+title: Log monitoring
+description: Use Elastic to deploy and manage logs at a petabyte scale, and get insights from your logs in minutes.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Elastic Observability allows you to deploy and manage logs at a petabyte scale, giving you insights into your logs in minutes. You can also search across your logs in one place, troubleshoot in real time, and detect patterns and outliers with categorization and anomaly detection. For more information, refer to the following links:
+
+- Get started with system logs: Onboard system log data from a machine or server.
+- Stream any log file: Send log files to your Observability project using a standalone ((agent)).
+- Parse and route logs: Parse your log data and extract structured fields that you can use to analyze your data.
+- Filter and aggregate logs: Filter and aggregate your log data to find specific information, gain insight, and monitor your systems more efficiently.
+- Explore logs: Find information on visualizing and analyzing logs.
+- Run pattern analysis on log data: Find patterns in unstructured log messages and make it easier to examine your data.
+- Troubleshoot logs: Find solutions for errors you might encounter while onboarding your logs.
+
+## Send logs data to your project
+
+You can send logs data to your project in different ways depending on your needs:
+
+- ((agent))
+- ((filebeat))
+
+When choosing between ((agent)) and ((filebeat)), consider the different features and functionalities between the two options.
+See [((beats)) and ((agent)) capabilities](((fleet-guide))/beats-agent-comparison.html) for more information on which option best fits your situation.
+
+### ((agent))
+
+((agent)) uses [integrations](https://www.elastic.co/integrations/data-integrations) to ingest logs from Kubernetes, MySQL, and many more data sources.
+You have the following options when installing and managing an ((agent)):
+
+#### ((fleet))-managed ((agent))
+
+Install an ((agent)) and use ((fleet)) to define, configure, and manage your agents in a central location.
+
+See [install ((fleet))-managed ((agent))](((fleet-guide))/install-fleet-managed-elastic-agent.html).
+
+#### Standalone ((agent))
+
+Install an ((agent)) and manually configure it locally on the system where it’s installed.
+You are responsible for managing and upgrading the agents.
+
+See [install standalone ((agent))](((fleet-guide))/install-standalone-elastic-agent.html).
+
+#### ((agent)) in a containerized environment
+
+Run an ((agent)) inside of a container — either with ((fleet-server)) or standalone.
+
+See [install ((agent)) in containers](((fleet-guide))/install-elastic-agents-in-containers.html).
+
+### ((filebeat))
+
+((filebeat)) is a lightweight shipper for forwarding and centralizing log data.
+Installed as a service on your servers, ((filebeat)) monitors the log files or locations that you specify, collects log events, and forwards them to your Observability project for indexing.
+
+- [((filebeat)) overview](((filebeat-ref))/filebeat-overview.html): General information on ((filebeat)) and how it works.
+- [((filebeat)) quick start](((filebeat-ref))/filebeat-installation-configuration.html): Basic installation instructions to get you started.
+- [Set up and run ((filebeat))](((filebeat-ref))/setting-up-and-running.html): Information on how to install, set up, and run ((filebeat)).
+
+## Configure logs
+
+The following resources provide information on configuring your logs:
+
+- [Data streams](((ref))/data-streams.html): Efficiently store append-only time series data in multiple backing indices partitioned by time and size.
+- [Data views](((kibana-ref))/data-views.html): Query log entries from the data streams of specific datasets or namespaces.
+- [Index lifecycle management](((ref))/example-using-index-lifecycle-policy.html): Configure the built-in logs policy based on your application's performance, resilience, and retention requirements.
+- [Ingest pipeline](((ref))/ingest.html): Parse and transform log entries into a suitable format before indexing.
+- [Mapping](((ref))/mapping.html): Define how data is stored and indexed.
+
+## View and monitor logs
+
+Use **Logs Explorer** to search, filter, and tail all your logs ingested into your project in one place.
+
+The following resources provide information on viewing and monitoring your logs:
+
+- Discover and explore: Discover and explore all of the log events flowing in from your servers, virtual machines, and containers in a centralized view.
+- Detect log anomalies: Use ((ml)) to detect log anomalies automatically.
+
+## Application logs
+
+Application logs provide valuable insight into events that have occurred within your services and applications.
+See Application logs.
+
+{/* ## Create a logs threshold alert
+
+You can create a rule to send an alert when the log aggregation exceeds a threshold.
+See Create a logs threshold rule. */}
diff --git a/docs/en/serverless/logging/parse-log-data.mdx b/docs/en/serverless/logging/parse-log-data.mdx
new file mode 100644
index 0000000000..7d8a40880c
--- /dev/null
+++ b/docs/en/serverless/logging/parse-log-data.mdx
@@ -0,0 +1,845 @@
+---
+id: serverlessObservabilityParseLogData
+slug: /serverless/observability/parse-log-data
+title: Parse and route logs
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+If your log data is unstructured or semi-structured, you can parse it and break it into meaningful fields. You can use those fields to explore and analyze your data. For example, you can find logs within a specific timestamp range or filter logs by log level to focus on potential issues.
+
+After parsing, you can use the structured fields to further organize your logs by configuring a reroute processor to send specific logs to different target data streams.
+
+Refer to the following sections for more on parsing and organizing your log data:
+
+* Extract structured fields: Extract structured fields like timestamps, log levels, or IP addresses to make querying and filtering your data easier.
+* Reroute log data to specific data streams: Route data from the generic data stream to a target data stream for more granular control over data retention, permissions, and processing.
+
+## Extract structured fields
+
+Make your logs more useful by extracting structured fields from your unstructured log data. Extracting structured fields makes it easier to search, analyze, and filter your log data.
+
+Follow the steps below to see how the following unstructured log data is indexed by default:
+
+```log
+2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.
+```
+
+Start by storing the document in the `logs-example-default` data stream:
+
+1. In your Observability project, go to **Developer Tools**.
+1. In the **Console** tab, add the example log to your project using the following command:
+
+ ```console
+ POST logs-example-default/_doc
+ {
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%."
+ }
+ ```
+
+1. Then, you can retrieve the document with the following search:
+
+ ```console
+ GET /logs-example-default/_search
+ ```
+
+The results should look like this:
+
+```json
+{
+ ...
+ "hits": {
+ ...
+ "hits": [
+ {
+ "_index": ".ds-logs-example-default-2023.08.09-000001",
+ ...
+ "_source": {
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.",
+ "@timestamp": "2023-08-09T17:19:27.73312243Z"
+ }
+ }
+ ]
+ }
+}
+```
+
+Your project indexes the `message` field by default and adds a `@timestamp` field. Since there was no timestamp set, it's set to `now`.
+At this point, you can search for phrases in the `message` field like `WARN` or `Disk usage exceeds`.
+For example, run the following command to search for the phrase `WARN` in the log's `message` field:
+
+```console
+GET logs-example-default/_search
+{
+ "query": {
+ "match": {
+ "message": {
+ "query": "WARN"
+ }
+ }
+ }
+}
+```
+
+While you can search for phrases in the `message` field, you can't use this field to filter log data. Your message, however, contains all of the following potential fields you can extract and use to filter and aggregate your log data:
+
+- **@timestamp** (`2023-08-08T13:45:12.123Z`): Extracting this field lets you sort logs by date and time. This is helpful when you want to view your logs in the order that they occurred or identify when issues happened.
+- **log.level** (`WARN`): Extracting this field lets you filter logs by severity. This is helpful if you want to focus on high-severity WARN or ERROR-level logs, and reduce noise by filtering out low-severity INFO-level logs.
+- **host.ip** (`192.168.1.101`): Extracting this field lets you filter logs by the host IP addresses. This is helpful if you want to focus on specific hosts that you’re having issues with or if you want to find disparities between hosts.
+- **message** (`Disk usage exceeds 90%.`): You can search for phrases or words in the message field.
+
+
+These fields are part of the [Elastic Common Schema (ECS)](((ecs-ref))/ecs-reference.html). The ECS defines a common set of fields that you can use across your project when storing data, including log and metric data.
+
+
+### Extract the `@timestamp` field
+
+When you added the log to your project in the previous section, the `@timestamp` field showed when the log was added. The timestamp showing when the log actually occurred was in the unstructured `message` field:
+
+```json
+ ...
+ "_source": {
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.", [^1]
+ "@timestamp": "2023-08-09T17:19:27.73312243Z" [^2]
+ }
+ ...
+```
+[^1]: The timestamp in the `message` field shows when the log occurred.
+[^2]: The timestamp in the `@timestamp` field shows when the log was added to your project.
+
+When looking into issues, you want to filter for logs by when the issue occurred not when the log was added to your project.
+To do this, extract the timestamp from the unstructured `message` field to the structured `@timestamp` field by completing the following:
+
+1. Use an ingest pipeline to extract the `@timestamp` field
+1. Test the pipeline with the simulate pipeline API
+1. Configure a data stream with an index template
+1. Create a data stream
+
+#### Use an ingest pipeline to extract the `@timestamp` field
+
+Ingest pipelines consist of a series of processors that perform common transformations on incoming documents before they are indexed.
+To extract the `@timestamp` field from the example log, use an ingest pipeline with a [dissect processor](((ref))/dissect-processor.html).
+The dissect processor extracts structured fields from unstructured log messages based on a pattern you set.
+
+Your project can parse string timestamps that are in `yyyy-MM-dd'T'HH:mm:ss.SSSZ` and `yyyy-MM-dd` formats into date fields.
+Since the log example's timestamp is in one of these formats, you don't need additional processors.
+More complex or nonstandard timestamps require a [date processor](((ref))/date-processor.html) to parse the timestamp into a date field.
+
+Use the following command to extract the timestamp from the `message` field into the `@timestamp` field:
+
+```console
+PUT _ingest/pipeline/logs-example-default
+{
+ "description": "Extracts the timestamp",
+ "processors": [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern": "%{@timestamp} %{message}"
+ }
+ }
+ ]
+}
+```
+
+The previous command sets the following values for your ingest pipeline:
+
+- `_ingest/pipeline/logs-example-default`: The name of the pipeline,`logs-example-default`, needs to match the name of your data stream. You'll set up your data stream in the next section. For more information, refer to the [data stream naming scheme](((fleet-guide))/data-streams.html#data-streams-naming-scheme).
+- `field`: The field you're extracting data from, `message` in this case.
+- `pattern`: The pattern of the elements in your log data. The `%{@timestamp} %{message}` pattern extracts the timestamp, `2023-08-08T13:45:12.123Z`, to the `@timestamp` field, while the rest of the message, `WARN 192.168.1.101 Disk usage exceeds 90%.`, stays in the `message` field. The dissect processor looks for the space as a separator defined by the pattern.
+
+#### Test the pipeline with the simulate pipeline API
+
+The [simulate pipeline API](((ref))/simulate-pipeline-api.html#ingest-verbose-param) runs the ingest pipeline without storing any documents.
+This lets you verify your pipeline works using multiple documents.
+
+Run the following command to test your ingest pipeline with the simulate pipeline API.
+
+```console
+POST _ingest/pipeline/logs-example-default/_simulate
+{
+ "docs": [
+ {
+ "_source": {
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%."
+ }
+ }
+ ]
+}
+```
+
+The results should show the `@timestamp` field extracted from the `message` field:
+
+```console
+{
+ "docs": [
+ {
+ "doc": {
+ "_index": "_index",
+ "_id": "_id",
+ "_version": "-3",
+ "_source": {
+ "message": "WARN 192.168.1.101 Disk usage exceeds 90%.",
+ "@timestamp": "2023-08-08T13:45:12.123Z"
+ },
+ ...
+ }
+ }
+ ]
+}
+```
+
+
+Make sure you've created the ingest pipeline using the `PUT` command in the previous section before using the simulate pipeline API.
+
+
+#### Configure a data stream with an index template
+
+After creating your ingest pipeline, run the following command to create an index template to configure your data stream's backing indices:
+
+```console
+PUT _index_template/logs-example-default-template
+{
+ "index_patterns": [ "logs-example-*" ],
+ "data_stream": { },
+ "priority": 500,
+ "template": {
+ "settings": {
+ "index.default_pipeline":"logs-example-default"
+ }
+ },
+ "composed_of": [
+ "logs@mappings",
+ "logs@settings",
+ "logs@custom",
+ "ecs@mappings"
+ ],
+ "ignore_missing_component_templates": ["logs@custom"]
+}
+```
+
+The previous command sets the following values for your index template:
+
+- `index_pattern`: Needs to match your log data stream. Naming conventions for data streams are `--`. In this example, your logs data stream is named `logs-example-*`. Data that matches this pattern will go through your pipeline.
+- `data_stream`: Enables data streams.
+- `priority`: Sets the priority of your index templates. Index templates with a higher priority take precedence. If a data stream matches multiple index templates, your project uses the template with the higher priority. Built-in templates have a priority of `200`, so use a priority higher than `200` for custom templates.
+- `index.default_pipeline`: The name of your ingest pipeline. `logs-example-default` in this case.
+- `composed_of`: Here you can set component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. Elastic has several built-in templates to help when ingesting your log data.
+
+The example index template above sets the following component templates:
+
+- `logs@mappings`: general mappings for log data streams that include disabling automatic date detection from `string` fields and specifying mappings for [`data_stream` ECS fields](((ecs-ref))/ecs-data_stream.html).
+- `logs@settings`: general settings for log data streams including the following:
+ * The default lifecycle policy that rolls over when the primary shard reaches 50 GB or after 30 days.
+ * The default pipeline uses the ingest timestamp if there is no specified `@timestamp` and places a hook for the `logs@custom` pipeline. If a `logs@custom` pipeline is installed, it's applied to logs ingested into this data stream.
+ * Sets the [`ignore_malformed`](((ref))/ignore-malformed.html) flag to `true`. When ingesting a large batch of log data, a single malformed field like an IP address can cause the entire batch to fail. When set to true, malformed fields with a mapping type that supports this flag are still processed.
+ * `logs@custom`: a predefined component template that is not installed by default. Use this name to install a custom component template to override or extend any of the default mappings or settings.
+ * `ecs@mappings`: dynamic templates that automatically ensure your data stream mappings comply with the [Elastic Common Schema (ECS)](((ecs-ref))/ecs-reference.html).
+
+#### Create a data stream
+
+Create your data stream using the [data stream naming scheme](((fleet-guide))/data-streams.html#data-streams-naming-scheme). Name your data stream to match the name of your ingest pipeline, `logs-example-default` in this case. Post the example log to your data stream with this command:
+
+```console
+POST logs-example-default/_doc
+{
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%."
+}
+```
+
+View your documents using this command:
+
+```console
+GET /logs-example-default/_search
+```
+
+You should see the pipeline has extracted the `@timestamp` field:
+
+```json
+{
+ ...
+ {
+ ...
+ "hits": {
+ ...
+ "hits": [
+ {
+ "_index": ".ds-logs-example-default-2023.08.09-000001",
+ "_id": "RsWy3IkB8yCtA5VGOKLf",
+ "_score": 1,
+ "_source": {
+ "message": "WARN 192.168.1.101 Disk usage exceeds 90%.",
+ "@timestamp": "2023-08-08T13:45:12.123Z" [^1]
+ }
+ }
+ ]
+ }
+ }
+}
+```
+[^1]: The extracted `@timestamp` field.
+
+You can now use the `@timestamp` field to sort your logs by the date and time they happened.
+
+#### Troubleshoot the `@timestamp` field
+
+Check the following common issues and solutions with timestamps:
+
+- **Timestamp failure:** If your data has inconsistent date formats, set `ignore_failure` to `true` for your date processor. This processes logs with correctly formatted dates and ignores those with issues.
+- **Incorrect timezone:** Set your timezone using the `timezone` option on the [date processor](((ref))/date-processor.html).
+- **Incorrect timestamp format:** Your timestamp can be a Java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. For more information on timestamp formats, refer to the [mapping date format](((ref))/mapping-date-format.html).
+
+### Extract the `log.level` field
+
+Extracting the `log.level` field lets you filter by severity and focus on critical issues. This section shows you how to extract the `log.level` field from this example log:
+
+```log
+2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.
+```
+
+To extract and use the `log.level` field:
+
+1. Add the `log.level` field to the dissect processor pattern in your ingest pipeline.
+1. Test the pipeline with the simulate API.
+1. Query your logs based on the `log.level` field.
+
+#### Add `log.level` to your ingest pipeline
+
+Add the `%{log.level}` option to the dissect processor pattern in the ingest pipeline you created in the Extract the `@timestamp` field section with this command:
+
+```console
+PUT _ingest/pipeline/logs-example-default
+{
+ "description": "Extracts the timestamp and log level",
+ "processors": [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern": "%{@timestamp} %{log.level} %{message}"
+ }
+ }
+ ]
+}
+```
+
+Now your pipeline will extract these fields:
+
+- The `@timestamp` field: `2023-08-08T13:45:12.123Z`
+- The `log.level` field: `WARN`
+- The `message` field: `192.168.1.101 Disk usage exceeds 90%.`
+
+In addition to setting an ingest pipeline, you need to set an index template. Use the index template created in the Extract the `@timestamp` field section.
+
+#### Test the pipeline with the simulate API
+
+Test that your ingest pipeline works as expected with the [simulate pipeline API](((ref))/simulate-pipeline-api.html#ingest-verbose-param):
+
+```console
+POST _ingest/pipeline/logs-example-default/_simulate
+{
+ "docs": [
+ {
+ "_source": {
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%."
+ }
+ }
+ ]
+}
+```
+
+The results should show the `@timestamp` and the `log.level` fields extracted from the `message` field:
+
+```json
+{
+ "docs": [
+ {
+ "doc": {
+ "_index": "_index",
+ "_id": "_id",
+ "_version": "-3",
+ "_source": {
+ "message": "192.168.1.101 Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ },
+ "@timestamp": "2023-8-08T13:45:12.123Z",
+ },
+ ...
+ }
+ }
+ ]
+}
+```
+
+#### Query logs based on `log.level`
+
+Once you've extracted the `log.level` field, you can query for high-severity logs like `WARN` and `ERROR`, which may need immediate attention, and filter out less critical `INFO` and `DEBUG` logs.
+
+Let's say you have the following logs with varying severities:
+
+```log
+2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.
+2023-08-08T13:45:14.003Z ERROR 192.168.1.103 Database connection failed.
+2023-08-08T13:45:15.004Z DEBUG 192.168.1.104 Debugging connection issue.
+2023-08-08T13:45:16.005Z INFO 192.168.1.102 User changed profile picture.
+```
+
+Add them to your data stream using this command:
+
+```console
+POST logs-example-default/_bulk
+{ "create": {} }
+{ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:14.003Z ERROR 192.168.1.103 Database connection failed." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:15.004Z DEBUG 192.168.1.104 Debugging connection issue." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:16.005Z INFO 192.168.1.102 User changed profile picture." }
+```
+
+Then, query for documents with a log level of `WARN` or `ERROR` with this command:
+
+```console
+GET logs-example-default/_search
+{
+ "query": {
+ "terms": {
+ "log.level": ["WARN", "ERROR"]
+ }
+ }
+}
+```
+
+The results should show only the high-severity logs:
+
+```json
+{
+...
+ },
+ "hits": {
+ ...
+ "hits": [
+ {
+ "_index": ".ds-logs-example-default-2023.08.14-000001",
+ "_id": "3TcZ-4kB3FafvEVY4yKx",
+ "_score": 1,
+ "_source": {
+ "message": "192.168.1.101 Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ },
+ "@timestamp": "2023-08-08T13:45:12.123Z"
+ }
+ },
+ {
+ "_index": ".ds-logs-example-default-2023.08.14-000001",
+ "_id": "3jcZ-4kB3FafvEVY4yKx",
+ "_score": 1,
+ "_source": {
+ "message": "192.168.1.103 Database connection failed.",
+ "log": {
+ "level": "ERROR"
+ },
+ "@timestamp": "2023-08-08T13:45:14.003Z"
+ }
+ }
+ ]
+ }
+}
+```
+
+### Extract the `host.ip` field
+
+Extracting the `host.ip` field lets you filter logs by host IP addresses allowing you to focus on specific hosts that you're having issues with or find disparities between hosts.
+
+The `host.ip` field is part of the [Elastic Common Schema (ECS)](((ecs-ref))/ecs-reference.html). Through the ECS, the `host.ip` field is mapped as an [`ip` field type](((ref))/ip.html). `ip` field types allow range queries so you can find logs with IP addresses in a specific range. You can also query `ip` field types using Classless Inter-Domain Routing (CIDR) notation to find logs from a particular network or subnet.
+
+This section shows you how to extract the `host.ip` field from the following example logs and query based on the extracted fields:
+
+```log
+2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.
+2023-08-08T13:45:14.003Z ERROR 192.168.1.103 Database connection failed.
+2023-08-08T13:45:15.004Z DEBUG 192.168.1.104 Debugging connection issue.
+2023-08-08T13:45:16.005Z INFO 192.168.1.102 User changed profile picture.
+```
+
+To extract and use the `host.ip` field:
+
+1. Add the `host.ip` field to your dissect processor in your ingest pipeline.
+1. Test the pipeline with the simulate API.
+1. Query your logs based on the `host.ip` field.
+
+#### Add `host.ip` to your ingest pipeline
+
+Add the `%{host.ip}` option to the dissect processor pattern in the ingest pipeline you created in the Extract the `@timestamp` field section:
+
+```console
+PUT _ingest/pipeline/logs-example-default
+{
+ "description": "Extracts the timestamp log level and host ip",
+ "processors": [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern": "%{@timestamp} %{log.level} %{host.ip} %{message}"
+ }
+ }
+ ]
+}
+```
+
+Your pipeline will extract these fields:
+
+- The `@timestamp` field: `2023-08-08T13:45:12.123Z`
+- The `log.level` field: `WARN`
+- The `host.ip` field: `192.168.1.101`
+- The `message` field: `Disk usage exceeds 90%.`
+
+In addition to setting an ingest pipeline, you need to set an index template. Use the index template created in the Extract the `@timestamp` field section.
+
+#### Test the pipeline with the simulate API
+
+Test that your ingest pipeline works as expected with the [simulate pipeline API](((ref))/simulate-pipeline-api.html#ingest-verbose-param):
+
+```console
+POST _ingest/pipeline/logs-example-default/_simulate
+{
+ "docs": [
+ {
+ "_source": {
+ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%."
+ }
+ }
+ ]
+}
+```
+
+The results should show the `host.ip`, `@timestamp`, and `log.level` fields extracted from the `message` field:
+
+```json
+{
+ "docs": [
+ {
+ "doc": {
+ ...
+ "_source": {
+ "host": {
+ "ip": "192.168.1.101"
+ },
+ "@timestamp": "2023-08-08T13:45:12.123Z",
+ "message": "Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ }
+ },
+ ...
+ }
+ }
+ ]
+}
+```
+
+#### Query logs based on `host.ip`
+
+You can query your logs based on the `host.ip` field in different ways, including using CIDR notation and range queries.
+
+Before querying your logs, add them to your data stream using this command:
+
+```console
+POST logs-example-default/_bulk
+{ "create": {} }
+{ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:14.003Z ERROR 192.168.1.103 Database connection failed." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:15.004Z DEBUG 192.168.1.104 Debugging connection issue." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:16.005Z INFO 192.168.1.102 User changed profile picture." }
+```
+
+##### CIDR notation
+
+You can use [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) to query your log data using a block of IP addresses that fall within a certain network segment. CIDR notations uses the format of `[IP address]/[prefix length]`. The following command queries IP addresses in the `192.168.1.0/24` subnet meaning IP addresses from `192.168.1.0` to `192.168.1.255`.
+
+```console
+GET logs-example-default/_search
+{
+ "query": {
+ "term": {
+ "host.ip": "192.168.1.0/24"
+ }
+ }
+}
+```
+
+Because all of the example logs are in this range, you'll get the following results:
+
+```json
+{
+ ...
+ },
+ "hits": {
+ ...
+ {
+ "_index": ".ds-logs-example-default-2023.08.16-000001",
+ "_id": "ak4oAIoBl7fe5ItIixuB",
+ "_score": 1,
+ "_source": {
+ "host": {
+ "ip": "192.168.1.101"
+ },
+ "@timestamp": "2023-08-08T13:45:12.123Z",
+ "message": "Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ }
+ }
+ },
+ {
+ "_index": ".ds-logs-example-default-2023.08.16-000001",
+ "_id": "a04oAIoBl7fe5ItIixuC",
+ "_score": 1,
+ "_source": {
+ "host": {
+ "ip": "192.168.1.103"
+ },
+ "@timestamp": "2023-08-08T13:45:14.003Z",
+ "message": "Database connection failed.",
+ "log": {
+ "level": "ERROR"
+ }
+ }
+ },
+ {
+ "_index": ".ds-logs-example-default-2023.08.16-000001",
+ "_id": "bE4oAIoBl7fe5ItIixuC",
+ "_score": 1,
+ "_source": {
+ "host": {
+ "ip": "192.168.1.104"
+ },
+ "@timestamp": "2023-08-08T13:45:15.004Z",
+ "message": "Debugging connection issue.",
+ "log": {
+ "level": "DEBUG"
+ }
+ }
+ },
+ {
+ "_index": ".ds-logs-example-default-2023.08.16-000001",
+ "_id": "bU4oAIoBl7fe5ItIixuC",
+ "_score": 1,
+ "_source": {
+ "host": {
+ "ip": "192.168.1.102"
+ },
+ "@timestamp": "2023-08-08T13:45:16.005Z",
+ "message": "User changed profile picture.",
+ "log": {
+ "level": "INFO"
+ }
+ }
+ }
+ ]
+ }
+}
+```
+
+##### Range queries
+
+Use [range queries](((ref))/query-dsl-range-query.html) to query logs in a specific range.
+
+The following command searches for IP addresses greater than or equal to `192.168.1.100` and less than or equal to `192.168.1.102`.
+
+```console
+GET logs-example-default/_search
+{
+ "query": {
+ "range": {
+ "host.ip": {
+ "gte": "192.168.1.100", [^1]
+ "lte": "192.168.1.102" [^2]
+ }
+ }
+ }
+}
+```
+[^1]: Greater than or equal to `192.168.1.100`.
+[^2]: Less than or equal to `192.168.1.102`.
+
+You'll get the following results only showing logs in the range you've set:
+
+```json
+{
+ ...
+ },
+ "hits": {
+ ...
+ {
+ "_index": ".ds-logs-example-default-2023.08.16-000001",
+ "_id": "ak4oAIoBl7fe5ItIixuB",
+ "_score": 1,
+ "_source": {
+ "host": {
+ "ip": "192.168.1.101"
+ },
+ "@timestamp": "2023-08-08T13:45:12.123Z",
+ "message": "Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ }
+ }
+ },
+ {
+ "_index": ".ds-logs-example-default-2023.08.16-000001",
+ "_id": "bU4oAIoBl7fe5ItIixuC",
+ "_score": 1,
+ "_source": {
+ "host": {
+ "ip": "192.168.1.102"
+ },
+ "@timestamp": "2023-08-08T13:45:16.005Z",
+ "message": "User changed profile picture.",
+ "log": {
+ "level": "INFO"
+ }
+ }
+ }
+ ]
+ }
+}
+```
+
+## Reroute log data to specific data streams
+
+By default, an ingest pipeline sends your log data to a single data stream. To simplify log data management, use a [reroute processor](((ref))/reroute-processor.html) to route data from the generic data stream to a target data stream. For example, you might want to send high-severity logs to a specific data stream to help with categorization.
+
+This section shows you how to use a reroute processor to send the high-severity logs (`WARN` or `ERROR`) from the following example logs to a specific data stream and keep the regular logs (`DEBUG` and `INFO`) in the default data stream:
+
+```log
+2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%.
+2023-08-08T13:45:14.003Z ERROR 192.168.1.103 Database connection failed.
+2023-08-08T13:45:15.004Z DEBUG 192.168.1.104 Debugging connection issue.
+2023-08-08T13:45:16.005Z INFO 192.168.1.102 User changed profile picture.
+```
+
+
+When routing data to different data streams, we recommend picking a field with a limited number of distinct values to prevent an excessive increase in the number of data streams. For more details, refer to the [Size your shards](((ref))/size-your-shards.html) documentation.
+
+
+To use a reroute processor:
+
+1. Add a reroute processor to your ingest pipeline.
+1. Add the example logs to your data stream.
+1. Query your logs and verify the high-severity logs were routed to the new data stream.
+
+### Add a reroute processor to the ingest pipeline
+
+Add a reroute processor to your ingest pipeline with the following command:
+
+```console
+PUT _ingest/pipeline/logs-example-default
+{
+ "description": "Extracts fields and reroutes WARN",
+ "processors": [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern": "%{@timestamp} %{log.level} %{host.ip} %{message}"
+ }
+ },
+ {
+ "reroute": {
+ "tag": "high_severity_logs",
+ "if" : "ctx.log?.level == 'WARN' || ctx.log?.level == 'ERROR'",
+ "dataset": "critical"
+ }
+ }
+ ]
+}
+```
+
+The previous command sets the following values for your reroute processor:
+
+- `tag`: Identifier for the processor that you can use for debugging and metrics. In the example, the tag is set to `high_severity_logs`.
+- `if`: Conditionally runs the processor. In the example, `"ctx.log?.level == 'WARN' || ctx.log?.level == 'ERROR'",` means the processor runs when the `log.level` field is `WARN` or `ERROR`.
+- `dataset`: the data stream dataset to route your document to if the previous condition is `true`. In the example, logs with a `log.level` of `WARN` or `ERROR` are routed to the `logs-critical-default` data stream.
+
+In addition to setting an ingest pipeline, you need to set an index template. Use the index template created in the Extract the `@timestamp` field section.
+
+### Add logs to a data stream
+
+Add the example logs to your data stream with this command:
+
+```console
+POST logs-example-default/_bulk
+{ "create": {} }
+{ "message": "2023-08-08T13:45:12.123Z WARN 192.168.1.101 Disk usage exceeds 90%." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:14.003Z ERROR 192.168.1.103 Database connection failed." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:15.004Z DEBUG 192.168.1.104 Debugging connection issue." }
+{ "create": {} }
+{ "message": "2023-08-08T13:45:16.005Z INFO 192.168.1.102 User changed profile picture." }
+```
+
+### Verify the reroute processor worked
+
+The reroute processor should route any logs with a `log.level` of `WARN` or `ERROR` to the `logs-critical-default` data stream. Query the data stream using the following command to verify the log data was routed as intended:
+
+```console
+GET logs-critical-default/_search
+```
+
+Your should see similar results to the following showing that the high-severity logs are now in the `critical` dataset:
+
+```json
+{
+ ...
+ "hits": {
+ ...
+ "hits": [
+ ...
+ "_source": {
+ "host": {
+ "ip": "192.168.1.101"
+ },
+ "@timestamp": "2023-08-08T13:45:12.123Z",
+ "message": "Disk usage exceeds 90%.",
+ "log": {
+ "level": "WARN"
+ },
+ "data_stream": {
+ "namespace": "default",
+ "type": "logs",
+ "dataset": "critical"
+ },
+ {
+ ...
+ "_source": {
+ "host": {
+ "ip": "192.168.1.103"
+ },
+ "@timestamp": "2023-08-08T13:45:14.003Z",
+ "message": "Database connection failed.",
+ "log": {
+ "level": "ERROR"
+ },
+ "data_stream": {
+ "namespace": "default",
+ "type": "logs",
+ "dataset": "critical"
+ }
+ }
+ }
+ ]
+ }
+}
+```
diff --git a/docs/en/serverless/logging/plaintext-application-logs.mdx b/docs/en/serverless/logging/plaintext-application-logs.mdx
new file mode 100644
index 0000000000..29ce023fc6
--- /dev/null
+++ b/docs/en/serverless/logging/plaintext-application-logs.mdx
@@ -0,0 +1,252 @@
+---
+id: serverlessObservabilityPlaintextApplicationLogs
+slug: /serverless/observability/plaintext-application-logs
+title: Plaintext application logs
+description: Parse and ingest raw, plain-text application logs using a log shipper like Filebeat.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import ApplicationLogsCorrelateLogs from '../transclusion/observability/application-logs/correlate-logs.mdx'
+import InstallWidget from '../transclusion/observability/tab-widgets/filebeat-install/widget.mdx'
+import SetupWidget from '../transclusion/observability/tab-widgets/filebeat-setup/widget.mdx'
+import StartWidget from '../transclusion/observability/tab-widgets/filebeat-start/widget.mdx'
+
+
+
+Ingest and parse plaintext logs, including existing logs, from any programming language or framework without modifying your application or its configuration.
+
+Plaintext logs require some additional setup that structured logs do not require:
+
+* To search, filter, and aggregate effectively, you need to parse plaintext logs using an ingest pipeline to extract structured fields. Parsing is based on log format, so you might have to maintain different settings for different applications.
+* To correlate plaintext logs, you need to inject IDs into log messages and parse them using an ingest pipeline.
+
+To ingest, parse, and correlate plaintext logs:
+
+1. Ingest plaintext logs with ((filebeat)) or ((agent)) and parse them before indexing with an ingest pipeline.
+1. Correlate plaintext logs with an ((apm-agent)).
+1. View logs in Logs Explorer
+
+## Ingest logs
+
+Send application logs to your project using one of the following shipping tools:
+
+* **((filebeat)):** A lightweight data shipper that sends log data to your project.
+* **((agent)):** A single agent for logs, metrics, security data, and threat prevention. With Fleet, you can centrally manage ((agent)) policies and lifecycles directly from your project.
+
+### Ingest logs with ((filebeat))
+
+
+Use ((filebeat)) version 8.11+ for the best experience when ingesting logs with ((filebeat)).
+
+
+Follow these steps to ingest application logs with ((filebeat)).
+
+#### Step 1: Install ((filebeat))
+
+Install ((filebeat)) on the server you want to monitor by running the commands that align with your system:
+
+
+
+#### Step 2: Connect to your project
+
+Connect to your project using an API key to set up ((filebeat)). Set the following information in the `filebeat.yml` file:
+
+```yaml
+output.elasticsearch:
+ hosts: ["your-projects-elasticsearch-endpoint"]
+ api_key: "id:api_key"
+```
+
+1. Set the `hosts` to your project's ((es)) endpoint. Locate your project's endpoint by clicking the help icon () and selecting **Endpoints**. Add the **((es)) endpoint** to your configuration.
+1. From **Developer tools**, run the following command to create an API key that grants `manage` permissions for the `cluster` and the `filebeat-*` indices using:
+
+ ```shell
+ POST /_security/api_key
+ {
+ "name": "your_api_key",
+ "role_descriptors": {
+ "filebeat_writer": {
+ "cluster": ["manage"],
+ "index": [
+ {
+ "names": ["filebeat-*"],
+ "privileges": ["manage", "create_doc"]
+ }
+ ]
+ }
+ }
+ }
+ ```
+
+ Refer to [Grant access using API keys](((filebeat-ref))/beats-api-keys.html) for more information.
+
+#### Step 3: Configure ((filebeat))
+
+Add the following configuration to the `filebeat.yaml` file to start collecting log data.
+
+```yaml
+filebeat.inputs:
+- type: filestream [^1]
+ enabled: true
+ paths: /path/to/logs.log [^2]
+```
+[^1]: Reads lines from an active log file.
+[^2]: Paths that you want ((filebeat)) to crawl and fetch logs from.
+
+You can add additional settings to the `filebeat.yml` file to meet the needs of your specific set up. For example, the following settings would add a parser to manage messages that span multiple lines and add service fields:
+
+```yaml
+ parsers:
+ - multiline:
+ type: pattern
+ pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
+ negate: true
+ match: after
+ fields_under_root: true
+ fields:
+ service.name: your_service_name
+ service.environment: your_service_environment
+ event.dataset: your_event_dataset
+```
+
+#### Step 4: Set up and start ((filebeat))
+
+From the ((filebeat)) installation directory, set the [index template](((ref))/index-templates.html) by running the command that aligns with your system:
+
+
+
+from the ((filebeat)) installation directory, start filebeat by running the command that aligns with your system:
+
+
+
+#### Step 5: Parse logs with an ingest pipeline
+
+Use an ingest pipeline to parse the contents of your logs into structured, [Elastic Common Schema (ECS)](((ecs-ref))/ecs-reference.html)-compatible fields.
+
+Create an ingest pipeline with a [dissect processor](((ref))/dissect-processor.html) to extract structured ECS fields from your log messages. In your project, go to **Developer Tools** and use a command similar to the following example:
+
+```shell
+PUT _ingest/pipeline/filebeat* [^1]
+{
+ "description": "Extracts the timestamp log level and host ip",
+ "processors": [
+ {
+ "dissect": { [^2]
+ "field": "message", [^3]
+ "pattern": "%{@timestamp} %{log.level} %{host.ip} %{message}" [^4]
+ }
+ }
+ ]
+}
+```
+[^1]: `_ingest/pipeline/filebeat*`: The name of the pipeline. Update the pipeline name to match the name of your data stream. For more information, refer to [Data stream naming scheme](((fleet-guide))/data-streams.html#data-streams-naming-scheme).
+[^2]: `processors.dissect`: Adds a [dissect processor](((ref))/dissect-processor.html) to extract structured fields from your log message.
+[^3]: `field`: The field you're extracting data from, `message` in this case.
+[^4]: `pattern`: The pattern of the elements in your log data. The pattern varies depending on your log format. `%{@timestamp}`, `%{log.level}`, `%{host.ip}`, and `%{message}` are common [ECS](((ecs-ref))/ecs-reference.html) fields. This pattern would match a log file in this format: `2023-11-07T09:39:01.012Z ERROR 192.168.1.110 Server hardware failure detected.`
+
+Refer to Extract structured fields for more on using ingest pipelines to parse your log data.
+
+After creating your pipeline, specify the pipeline for filebeat in the `filebeat.yml` file:
+
+```yaml
+output.elasticsearch:
+ hosts: ["your-projects-elasticsearch-endpoint"]
+ api_key: "id:api_key"
+ pipeline: "your-pipeline" [^1]
+```
+[^1]: Add the pipeline output and the name of your pipeline to the output.
+
+### Ingest logs with ((agent))
+
+Follow these steps to ingest and centrally manage your logs using ((agent)) and ((fleet)).
+
+#### Step 1: Add the custom logs integration to your project
+
+To add the custom logs integration to your project:
+
+1. In your ((observability)) project, go to **Project Settings** → **Integrations**.
+1. Type `custom` in the search bar and select **Custom Logs**.
+1. Click **Add Custom Logs**.
+1. Click **Install ((agent))** at the bottom of the page, and follow the instructions for your system to install the ((agent)).
+1. After installing the ((agent)), configure the integration from the **Add Custom Logs integration** page.
+1. Give your integration a meaningful name and description.
+1. Add the **Log file path**. For example, `/var/log/your-logs.log`.
+1. An agent policy is created that defines the data your ((agent)) collects. If you've previously installed an ((agent)) on the host you're collecting logs from, you can select the **Existing hosts** tab and use an existing agent policy.
+1. Click **Save and continue**.
+
+You can add additional settings to the integration under **Custom log file** by clicking **Advanced options** and adding YAML configurations to the **Custom configurations**. For example, the following settings would add a parser to manage messages that span multiple lines and add service fields. Service fields are used for Log correlation.
+
+```yaml
+ parsers:
+ - multiline:
+ type: pattern
+ pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
+ negate: true
+ match: after
+ fields_under_root: true
+ fields:
+ service.name: your_service_name [^1]
+ service.version: your_service_version [^1]
+ service.environment: your_service_environment [^1]
+```
+[^1]: for Log correlation, add the `service.name` (required), `service.version` (optional), and `service.environment` (optional) of the service you're collecting logs from.
+
+#### Step 2: Add an ingest pipeline to your integration
+
+To aggregate or search for information in plaintext logs, use an ingest pipeline with your integration to parse the contents of your logs into structured, [Elastic Common Schema (ECS)](((ecs-ref))/ecs-reference.html)-compatible fields.
+
+1. From the custom logs integration, select **Integration policies** tab.
+1. Select the integration policy you created in the previous section.
+1. Click **Change defaults** → **Advanced options**.
+1. Under **Ingest pipelines**, click **Add custom pipeline**.
+1. Create an ingest pipeline with a [dissect processor](((ref))/dissect-processor.html) to extract structured fields from your log messages.
+
+ Click **Import processors** and add a similar JSON to the following example:
+
+ ```JSON
+ {
+ "description": "Extracts the timestamp log level and host ip",
+ "processors": [
+ {
+ "dissect": { [^1]
+ "field": "message", [^2]
+ "pattern": "%{@timestamp} %{log.level} %{host.ip} %{message}" [^3]
+ }
+ }
+ ]
+ }
+ ```
+ [^1]: `processors.dissect`: Adds a [dissect processor](((ref))/dissect-processor.html) to extract structured fields from your log message.
+ [^2]: `field`: The field you're extracting data from, `message` in this case.
+ [^3]: `pattern`: The pattern of the elements in your log data. The pattern varies depending on your log format. `%{@timestamp}`, `%{log.level}`, `%{host.ip}`, and `%{message}` are common [ECS](((ecs-ref))/ecs-reference.html) fields. This pattern would match a log file in this format: `2023-11-07T09:39:01.012Z ERROR 192.168.1.110 Server hardware failure detected.`
+1. Click **Create pipeline**.
+1. Save and deploy your integration.
+
+## Correlate logs
+Correlate your application logs with trace events to:
+
+* view the context of a log and the parameters provided by a user
+* view all logs belonging to a particular trace
+* easily move between logs and traces when debugging application issues
+
+Log correlation works on two levels:
+
+- at service level: annotation with `service.name`, `service.version`, and `service.environment` allow you to link logs with APM services
+- at trace level: annotation with `trace.id` and `transaction.id` allow you to link logs with traces
+
+Learn about correlating plaintext logs in the agent-specific ingestion guides:
+
+* [Go](((apm-go-ref))/logs.html)
+* [Java](((apm-java-ref))/logs.html#log-correlation-ids)
+* [.NET](((apm-dotnet-ref))/log-correlation.html)
+* [Node.js](((apm-node-ref))/log-correlation.html)
+* [Python](((apm-py-ref))/logs.html#log-correlation-ids)
+* [Ruby](((apm-ruby-ref))/log-correlation.html)
+
+## View logs
+
+To view logs ingested by ((filebeat)), go to **Discover**. Create a data view based on the `filebeat-*` index pattern. Refer to [Create a data view](((kibana-ref))/data-views.html) for more information.
+
+To view logs ingested by ((agent)), go to **Discover** and select the **Logs Explorer** tab. Refer to the Filter and aggregate logs documentation for more on viewing and filtering your log data.
\ No newline at end of file
diff --git a/docs/en/serverless/logging/run-log-pattern-analysis.mdx b/docs/en/serverless/logging/run-log-pattern-analysis.mdx
new file mode 100644
index 0000000000..55d4d93d75
--- /dev/null
+++ b/docs/en/serverless/logging/run-log-pattern-analysis.mdx
@@ -0,0 +1,39 @@
+---
+id: serverlessObservabilityRunLogPatternAnalysis
+slug: /serverless/observability/run-log-pattern-analysis
+title: Run a pattern analysis on log data
+description: Find patterns in unstructured log messages.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+
+
+Log pattern analysis helps you find patterns in unstructured log messages and makes it easier to examine your data.
+When you run a pattern analysis, it performs categorization analysis on a selected field,
+creates categories based on the data, and then displays them together in a chart that shows the distribution of each category and an example document that matches the category.
+Log pattern analysis is useful when you want to examine how often different types of logs appear in your data set.
+It also helps you group logs in ways that go beyond what you can achieve with a terms aggregation.
+
+Log pattern analysis works on every text field.
+
+To run a log pattern analysis:
+
+1. In your ((observability)) project, go to **Discover** and select the **Logs Explorer** tab.
+
+1. Select an integration, for example **Elastic APM error_logs**, and apply any filters that you want.
+
+1. If you don't see any results, expand the time range, for example, to **Last 15 days**.
+
+1. In the **Available fields** list, select the text field you want to analyze, then click **Run pattern analysis**.
+
+
+
+ The results of the analysis are shown in a table:
+
+ ![Log pattern analysis of the message field ](../images/log-pattern-analysis.png)
+
+1. (Optional) Select one or more patterns, then choose to filter for (or filter out) documents that match the selected patterns.
+**Logs Explorer** only displays documents that match (or don't match) the selected patterns.
+The filter options enable you to remove unimportant messages and focus on the more important, actionable data during troubleshooting.
diff --git a/docs/en/serverless/logging/send-application-logs.mdx b/docs/en/serverless/logging/send-application-logs.mdx
new file mode 100644
index 0000000000..408249b8f5
--- /dev/null
+++ b/docs/en/serverless/logging/send-application-logs.mdx
@@ -0,0 +1,19 @@
+---
+id: serverlessObservabilitySendApplicationLogs
+slug: /serverless/observability/send-application-logs
+title: ((apm-agent)) log sending
+description: Use the Java ((apm-agent)) to capture and send logs.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import ApplicationLogsApmAgentLogSending from '../transclusion/observability/application-logs/apm-agent-log-sending.mdx'
+
+
+
+
+
+## Get started
+
+See the [Java agent](((apm-java-ref))/logs.html#log-sending) documentation to get started.
\ No newline at end of file
diff --git a/docs/en/serverless/logging/stream-log-files.mdx b/docs/en/serverless/logging/stream-log-files.mdx
new file mode 100644
index 0000000000..c82d5d6ded
--- /dev/null
+++ b/docs/en/serverless/logging/stream-log-files.mdx
@@ -0,0 +1,290 @@
+---
+id: serverlessObservabilityStreamLogFiles
+slug: /serverless/observability/stream-log-files
+title: Stream any log file
+description: Send a log file to your Observability project using the standalone ((agent)).
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+import DownloadWidget from '../transclusion/fleet/tab-widgets/download-widget.mdx'
+import RunStandaloneWidget from '../transclusion/fleet/tab-widgets/run-standalone-widget.mdx'
+import AgentLocationWidget from '../transclusion/observability/tab-widgets/logs/agent-location/widget.mdx'
+import StopWidget from '../transclusion/fleet/tab-widgets/stop-widget.mdx'
+import StartWidget from '../transclusion/fleet/tab-widgets/start-widget.mdx'
+import Roles from '../partials/roles.mdx'
+
+
+
+This guide shows you how to send a log file to your Observability project using a standalone ((agent)) and configure the ((agent)) and your data streams using the `elastic-agent.yml` file, and query your logs using the data streams you've set up.
+
+The quickest way to get started is to:
+
+1. Open your Observability project. If you don't have one, .
+1. Go to **Add Data**.
+1. Under **Collect and analyze logs**, click **Stream log files**.
+
+This will kick off a set of guided instructions that walk you through configuring the standalone ((agent)) and sending log data to your project.
+
+To install and configure the ((agent)) manually, refer to Manually install and configure the standalone ((agent)).
+
+## Configure inputs and integration
+
+Enter a few configuration details in the guided instructions.
+
+{/* Do we want to include a screenshot or will it be too difficult to maintain? */}
+![Configure inputs and integration in the Stream log files guided instructions](../images/logs-stream-logs-config.png)
+
+**Configure inputs**
+
+* **Log file path**: The path to your log files.
+ You can also use a pattern like `/var/log/your-logs.log*`.
+ Click **Add row** to add more log file paths.
+
+ This will be passed to the `paths` field in the generated `elastic-agent.yml` file in a future step.
+
+
+* **Service name**: Provide a service name to allow for distributed services running on
+ multiple hosts to correlate the related instances.
+
+{/* Advanced settings? */}
+
+**Configure integration**
+
+Elastic creates an integration to streamline connecting your log data to Elastic.
+
+* **Integration name**: Give your integration a name.
+ This is a unique identifier for your stream of log data that you can later use to filter data in Logs Explorer.
+ The value must be unique within your project, all lowercase, and max 100 chars. Special characters will be replaced with `_`.
+
+ This will be passed to the `streams.id` field in the generated `elastic-agent.yml` file in a future step.
+
+ The integration name will be used in Logs Explorer.
+ It will appear in the "All logs" dropdown menu.
+
+
+
+
+* **Dataset name**: Give your integration's dataset a name.
+ The name for your dataset data stream. Name this data stream anything that signifies the source of the data.
+ The value must be all lowercase and max 100 chars. Special characters will be replaced with `_`.
+
+ This will be passed to the `data_stream.dataset` field in the generated `elastic-agent.yml` file in a future step.
+
+## Install the ((agent))
+
+After configuring the inputs and integration, you'll continue in the guided instructions to
+install and configure the standalone ((agent)).
+
+Run the command under **Install the ((agent))** that corresponds with your system to download, extract, and install the ((agent)).
+Turning on **Automatically download the agent's config** includes your updated ((agent)) configuration file in the download.
+
+If you do not want to automatically download the configuration, click **Download config file** to download it manually and
+add it to `/opt/Elastic/Agent/elastic-agent.yml` on the host where you installed the ((agent)).
+The values you provided in Configure inputs and integration will be prepopulated in the generated configuration file.
+
+
+
+## Manually install and configure the standalone ((agent))
+
+If you're not using the guided instructions, follow these steps to manually install and configure your the ((agent)).
+
+### Step 1: Download and extract the ((agent)) installation package
+
+On your host, download and extract the installation package that corresponds with your system:
+
+
+
+### Step 2: Install and start the ((agent))
+After downloading and extracting the installation package, you're ready to install the ((agent)).
+From the agent directory, run the install command that corresponds with your system:
+
+
+On macOS, Linux (tar package), and Windows, run the `install` command to
+install and start ((agent)) as a managed service and start the service. The DEB and RPM
+packages include a service unit for Linux systems with
+systemd, For these systems, you must enable and start the service.
+
+
+
+
+
+
+During installation, you'll be prompted with some questions:
+
+1. When asked if you want to install the agent as a service, enter `Y`.
+1. When asked if you want to enroll the agent in Fleet, enter `n`.
+
+### Step 3: Configure the ((agent))
+
+After your agent is installed, configure it by updating the `elastic-agent.yml` file.
+
+#### Locate your configuration file
+
+You'll find the `elastic-agent.yml` in one of the following locations according to your system:
+
+
+
+#### Update your configuration file
+
+Update the default configuration in the `elastic-agent.yml` file manually.
+It should look something like this:
+
+```yaml
+outputs:
+ default:
+ type: elasticsearch
+ hosts: ':'
+ api_key: 'your-api-key'
+inputs:
+ - id: your-log-id
+ type: filestream
+ streams:
+ - id: your-log-stream-id
+ data_stream:
+ dataset: example
+ paths:
+ - /var/log/your-logs.log
+```
+
+You need to set the values for the following fields:
+
+
+
+ `hosts`
+
+ Copy the ((es)) endpoint from your project's page and add the port (the default port is `443`). For example, `https://my-deployment.es.us-central1.gcp.cloud.es.io:443`.
+
+ If you're following the guided instructions in your project,
+ the ((es)) endpoint will be prepopulated in the configuration file.
+
+
+ If you need to find your project's ((es)) endpoint outside the guided instructions:
+
+ 1. Go to the **Projects** page that lists all your projects.
+ 1. Click **Manage** next to the project you want to connect to.
+ 1. Click **View** next to _Endpoints_.
+ 1. Copy the _Elasticsearch endpoint_.
+
+
+
+ ![Copy a project's Elasticsearch endpoint](../images/log-copy-es-endpoint.png)
+
+
+
+
+ `api-key`
+
+ Use an API key to grant the agent access to your project.
+ The API key format should be `:`.
+
+ If you're following the guided instructions in your project, an API key will be autogenerated
+ and will be prepopulated in the downloadable configuration file.
+
+
+
+ If configuring the ((agent)) manually, create an API key:
+
+ 1. Navigate to **Project settings** → **Management** → **API keys** and click **Create API key**.
+ 1. Select **Restrict privileges** and add the following JSON to give privileges for ingesting logs.
+ ```json
+ {
+ "standalone_agent": {
+ "cluster": [
+ "monitor"
+ ],
+ "indices": [
+ {
+ "names": [
+ "logs-*-*"
+ ],
+ "privileges": [
+ "auto_configure", "create_doc"
+ ]
+ }
+ ]
+ }
+ }
+ ```
+ 1. You _must_ set the API key to configure ((beats)).
+ Immediately after the API key is generated and while it is still being displayed, click the
+ **Encoded** button next to the API key and select **Beats** from the list in the tooltip.
+ Base64 encoded API keys are not currently supported in this configuration.
+
+ ![](../images/logs-stream-logs-api-key-beats.png)
+
+
+
+ `inputs.id`
+
+ A unique identifier for your input.
+
+
+
+ `type`
+
+ The type of input. For collecting logs, set this to `filestream`.
+
+
+
+ `streams.id`
+
+ A unique identifier for your stream of log data.
+
+ If you're following the guided instructions in your project, this will be prepopulated with
+ the value you specified in Configure inputs and integration.
+
+
+
+ `data_stream.dataset`
+
+ The name for your dataset data stream. Name this data stream anything that signifies the source of the data. In this configuration, the dataset is set to `example`. The default value is `generic`.
+
+ If you're following the guided instructions in your project, this will be prepopulated with
+ the value you specified in Configure inputs and integration.
+
+
+
+ `paths`
+
+ The path to your log files. You can also use a pattern like `/var/log/your-logs.log*`.
+
+ If you're following the guided instructions in your project, this will be prepopulated with
+ the value you specified in Configure inputs and integration.
+
+
+
+
+#### Restart the ((agent))
+
+After updating your configuration file, you need to restart the ((agent)).
+
+First, stop the ((agent)) and its related executables using the command that works with your system:
+
+
+
+
+
+Next, restart the ((agent)) using the command that works with your system:
+
+
+
+## Troubleshoot your ((agent)) configuration
+
+If you're not seeing your log files in your project, verify the following in the `elastic-agent.yml` file:
+
+- The path to your logs file under `paths` is correct.
+- Your API key is in `:` format. If not, your API key may be in an unsupported format, and you'll need to create an API key in **Beats** format.
+
+If you're still running into issues, refer to [((agent)) troubleshooting](((fleet-guide))/fleet-troubleshooting.html) and [Configure standalone Elastic Agents](((fleet-guide))/elastic-agent-configuration.html).
+
+## Next steps
+
+After you have your agent configured and are streaming log data to your project:
+
+- Refer to the Parse and organize logs documentation for information on extracting structured fields from your log data, rerouting your logs to different data streams, and filtering and aggregating your log data.
+- Refer to the Filter and aggregate logs documentation for information on filtering and aggregating your log data to find specific information, gain insight, and monitor your systems more efficiently.
diff --git a/docs/en/serverless/logging/troubleshoot-logs.mdx b/docs/en/serverless/logging/troubleshoot-logs.mdx
new file mode 100644
index 0000000000..c7aefbbb47
--- /dev/null
+++ b/docs/en/serverless/logging/troubleshoot-logs.mdx
@@ -0,0 +1,114 @@
+---
+id: serverlessObservabilityTroubleshootLogs
+slug: /serverless/observability/troubleshoot-logs
+title: Troubleshoot logs
+description: Find solutions to errors you might encounter while onboarding your logs.
+tags: [ 'serverless', 'observability', 'troubleshooting' ]
+---
+
+
+
+This section provides possible solutions for errors you might encounter while onboarding your logs.
+
+## User does not have permissions to create API key
+
+When adding a new data using the guided instructions in your project (**Add data** → **Collect and analyze logs** → **Stream log files**),
+if you don't have the required privileges to create an API key, you'll see the following error message:
+
+>You need permission to manage API keys
+
+### Solution
+
+You need to either:
+
+* Ask an administrator to update your user role to at least **Deployment access** → **Admin**. Read more about user roles in . After your use role is updated, restart the onboarding flow.
+* Get an API key from an administrator and manually add the API to the ((agent)) configuration. See Configure the ((agent)) for more on manually updating the configuration and adding the API key.
+
+{/* Not sure if these are different in serverless... */}
+
+{/* ## Failed to create API key
+
+If you don't have the privileges to create `savedObjects` in a project, you'll see the following error message:
+
+```plaintext
+Failed to create API key
+
+Something went wrong: Unable to create observability-onboarding-state
+```
+
+### Solution
+
+You need an administrator to give you the `Saved Objects Management` ((kib)) privilege to generate the required `observability-onboarding-state` flow state.
+Once you have the necessary privileges, restart the onboarding flow. */}
+
+## Observability project not accessible from host
+
+If your Observability project is not accessible from the host, you'll see the following error message after pasting the **Install the ((agent))** instructions into the host:
+
+```plaintext
+Failed to connect to {host} port {port} after 0 ms: Connection refused
+```
+
+### Solution
+
+The host needs access to your project. Port `443` must be open and the project's ((es)) endpoint must be reachable. You can locate your project's endpoint by clicking the help icon () and selecting **Endpoints**. Run the following command, replacing the URL with your endpoint, and you should get an authentication error with more details on resolving your issue:
+
+```shell
+curl https://your-endpoint.elastic.cloud
+```
+
+## Download ((agent)) failed
+
+If the host was able to download the installation script but cannot connect to the public artifact repository, you'll see the following error message:
+
+```plaintext
+Download Elastic Agent
+
+Failed to download Elastic Agent, see script for error.
+```
+
+### Solutions
+
+* If the combination of the ((agent)) version and operating system architecture is not available, you'll see the following error message:
+
+ ```plaintext
+ The requested URL returned error: 404
+ ```
+
+ To fix this, update the ((agent)) version in the installation instructions to a known version of the ((agent)).
+
+* If the ((agent)) was fully downloaded previously, you'll see the following error message:
+
+ ```plaintext
+ Error: cannot perform installation as Elastic Agent is already running from this directory
+ ```
+
+ To fix this, delete previous downloads and restart the onboarding.
+
+* You're an Elastic Cloud Enterprise user without access to the Elastic downloads page.
+
+## Install ((agent)) failed
+
+If an ((agent)) already exists on your host, you'll see the following error message:
+
+```plaintext
+Install Elastic Agent
+
+Failed to install Elastic Agent, see script for error.
+```
+
+### Solution
+
+You can uninstall the current ((agent)) using the `elastic-agent uninstall` command, and run the script again.
+
+
+Uninstalling the current ((agent)) removes the entire current setup, including the existing configuration.
+
+
+## Waiting for Logs to be shipped... step never completes
+
+If the **Waiting for Logs to be shipped...** step never completes, logs are not being shipped to your Observability project, and there is most likely an issue with your ((agent)) configuration.
+
+### Solution
+
+Inspect the ((agent)) logs for errors. See the [Debug standalone ((agent))s](((fleet-guide))/debug-standalone-agents.html#inspect-standalone-agent-logs) documentation for more on finding errors in ((agent)) logs.
diff --git a/docs/en/serverless/logging/view-and-monitor-logs.mdx b/docs/en/serverless/logging/view-and-monitor-logs.mdx
new file mode 100644
index 0000000000..0378428335
--- /dev/null
+++ b/docs/en/serverless/logging/view-and-monitor-logs.mdx
@@ -0,0 +1,81 @@
+---
+id: serverlessObservabilityDiscoverAndExploreLogs
+slug: /serverless/observability/discover-and-explore-logs
+title: Explore logs
+description: Visualize and analyze logs.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+
+
+With **Logs Explorer**, based on Discover, you can quickly search and filter your log data, get information about the structure of log fields, and display your findings in a visualization.
+You can also customize and save your searches and place them on a dashboard.
+Instead of having to log into different servers, change directories, and view individual files, all your logs are available in a single view.
+
+Go to Logs Explorer by opening **Discover** from the navigation menu, and selecting the **Logs Explorer** tab.
+
+![Screen capture of the Logs Explorer](../images/log-explorer.png)
+
+## Required ((kib)) privileges
+
+Viewing data in Logs Explorer requires `read` privileges for **Discover** and **Integrations**.
+For more on assigning Kibana privileges, refer to the [((kib)) privileges](((kibana-ref))/kibana-privileges.html) docs.
+
+## Find your logs
+
+By default, Logs Explorer shows all of your logs.
+If you need to focus on logs from a specific integrations, select the integration from the logs menu:
+
+
+
+Once you have the logs you want to focus on displayed, you can drill down further to find the information you need.
+For more on filtering your data in Logs Explorer, refer to Filter logs in Log Explorer.
+
+## Review log data in the documents table
+
+The documents table in Logs Explorer functions similarly to the table in Discover.
+You can add fields, order table columns, sort fields, and update the row height in the same way you would in Discover.
+
+Refer to the [Discover](((kibana-ref))/discover.html) documentation for more information on updating the table.
+
+### Analyze data with smart fields
+
+Smart fields are dynamic fields that provide valuable insight on where your log documents come from, what information they contain, and how you can interact with them.
+The following sections detail the smart fields available in Logs Explorer.
+
+#### Resource smart field
+
+The resource smart field shows where your logs are coming from by displaying fields like `service.name`, `container.name`, `orchestrator.namespace`, `host.name`, and `cloud.instance.id`.
+Use this information to see where issues are coming from and if issues are coming from the same source.
+
+#### Content smart field
+
+The content smart field shows your logs' `log.level` and `message` fields.
+If neither of these fields are available, the content smart field will show the `error.message` or `event.original` field.
+Use this information to see your log content and inspect issues.
+
+#### Actions smart field
+
+The actions smart field provides access to additional information about your logs.
+
+**Expand:** () Open the log details to get an in-depth look at an individual log file.
+
+**Degraded document indicator:** () Shows if any of the document's fields were ignored when it was indexed.
+Ignored fields could indicate malformed fields or other issues with your document. Use this information to investigate and determine why fields are being ignored.
+
+**Stacktrace indicator:** () Shows if the document contains stack traces.
+This indicator makes it easier to navigate through your documents and know if they contain additional information in the form of stack traces.
+
+## View log details
+
+Click the expand icon () in the **Actions** column to get an in-depth look at an individual log file.
+
+These details provide immediate feedback and context for what's happening and where it's happening for each log.
+From here, you can quickly debug errors and investigate the services where errors have occurred.
+
+The following actions help you filter and focus on specific fields in the log details:
+
+* **Filter for value ():** Show logs that contain the specific field value.
+* **Filter out value ():** Show logs that do _not_ contain the specific field value.
+* **Filter for field present ():** Show logs that contain the specific field.
+* **Toggle column in table ():** Add or remove a column for the field to the main Logs Explorer table.
\ No newline at end of file
diff --git a/docs/en/serverless/observability-overview.mdx b/docs/en/serverless/observability-overview.mdx
new file mode 100644
index 0000000000..dc50701bf2
--- /dev/null
+++ b/docs/en/serverless/observability-overview.mdx
@@ -0,0 +1,137 @@
+---
+id: serverlessObservabilityOverview
+slug: /serverless/observability/serverless-observability-overview
+title: Observability overview
+description: Learn how to accelerate problem resolution with open, flexible, and unified observability powered by advanced machine learning and analytics.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+
+
+((observability)) provides granular insights and context into the behavior of applications running in your environments.
+It's an important part of any system that you build and want to monitor.
+Being able to detect and fix root cause events quickly within an observable system is a minimum requirement for any analyst.
+
+Elastic ((observability)) provides a single stack to unify your logs, metrics, and application traces.
+Ingest your data directly to your Observability project, where you can further process and enhance the data,
+before visualizing it and adding alerts.
+
+
+
+
+
+## Log monitoring
+
+Analyze log data from your hosts, services, Kubernetes, Apache, and many more.
+
+In **Logs Explorer** (powered by Discover), you can quickly search and filter your log data,
+get information about the structure of the fields, and display your findings in a visualization.
+
+![Logs Explorer showing log events](images/log-explorer-overview.png)
+
+Learn more about log monitoring →
+
+
+
+{/* RUM is not supported for this release. */}
+
+{/* Synthetic monitoring is not supported for this release. */}
+
+{/* Universal Profiling is not supported for this release. */}
+
+## Application performance monitoring (APM)
+
+Instrument your code and collect performance data and errors at runtime by installing APM agents like Java, Go, .NET, and many more.
+Then use ((observability)) to monitor your software services and applications in real time:
+
+* Visualize detailed performance information on your services.
+* Identify and analyze errors.
+* Monitor host-level and APM agent-specific metrics like JVM and Go runtime metrics.
+
+The **Service** inventory provides a quick, high-level overview of the health and general performance of all instrumented services.
+
+![Service inventory showing health and performance of instrumented services](images/services-inventory.png)
+
+Learn more about Application performance monitoring (APM) →
+
+
+
+## Infrastructure monitoring
+
+Monitor system and service metrics from your servers, Docker, Kubernetes, Prometheus, and other services and applications.
+
+The **Infrastructure** UI provides a couple ways to view and analyze metrics across your infrastructure:
+
+The **Inventory** page provides a view of your infrastructure grouped by resource type.
+
+![((infrastructure-app)) in ((kib))](images/metrics-app.png)
+
+The **Hosts** page provides a dashboard-like view of your infrastructure and is backed by an easy-to-use interface called Lens.
+
+![Screenshot of the Hosts page](images/hosts.png)
+
+From either page, you can view health and performance metrics to get visibility into the overall health of your infrastructure.
+You can also drill down into details about a specific host, including performance metrics, host metadata, running processes,
+and logs.
+
+Learn more about infrastructure monitoring →
+
+## Synthetic monitoring
+
+Simulate actions and requests that an end user would perform on your site at predefined intervals and in a controlled environment.
+The end result is rich, consistent, and repeatable data that you can trend and alert on.
+
+For more information, see Synthetic monitoring.
+
+## Alerting
+
+Stay aware of potential issues in your environments with ((observability))’s alerting
+and actions feature that integrates with log monitoring and APM.
+It provides a set of built-in actions and specific threshold rules
+and enables central management of all rules.
+
+On the **Alerts** page, the **Alerts** table provides a snapshot of alerts occurring within the specified time frame. The table includes the alert status, when it was last updated, the reason for the alert, and more.
+
+![Summary of Alerts on the ((observability)) overview page](images/observability-alerts-overview.png)
+
+Learn more about alerting →
+
+## Service-level objectives (SLOs)
+
+Set clear, measurable targets for your service performance,
+based on factors like availability, response times, error rates, and other key metrics.
+Then monitor and track your SLOs in real time,
+using detailed dashboards and alerts that help you quickly identify and troubleshoot issues.
+
+From the SLO overview list, you can see all of your SLOs and a quick summary of what’s happening in each one:
+
+![Dashboard showing list of SLOs](images/slo-dashboard.png)
+
+Learn more about SLOs →
+
+## Cases
+
+Collect and share information about observability issues by creating cases.
+Cases allow you to track key investigation details,
+add assignees and tags to your cases, set their severity and status, and add alerts,
+comments, and visualizations. You can also send cases to third-party systems,
+such as ServiceNow and Jira.
+
+![Screenshot showing list of cases](images/cases.png)
+
+Learn more about cases →
+
+## AIOps
+
+Reduce the time and effort required to detect, understand, investigate, and resolve incidents at scale
+by leveraging predictive analytics and machine learning:
+
+* Detect anomalies by comparing real-time and historical data from different sources to look for unusual, problematic patterns.
+* Find and investigate the causes of unusual spikes or drops in log rates.
+* Detect distribution changes, trend changes, and other statistically significant change points in a metric of your time series data.
+
+![Log rate analysis page showing log rate spike ](images/log-rate-analysis.png)
+
+Learn more about AIOps →
diff --git a/docs/en/serverless/partials/apm-agent-warning.mdx b/docs/en/serverless/partials/apm-agent-warning.mdx
new file mode 100644
index 0000000000..49b4b09d1d
--- /dev/null
+++ b/docs/en/serverless/partials/apm-agent-warning.mdx
@@ -0,0 +1,3 @@
+
+ Not all APM agent configuration options are compatible with Elastic Cloud serverless.
+
\ No newline at end of file
diff --git a/docs/en/serverless/partials/feature-beta.mdx b/docs/en/serverless/partials/feature-beta.mdx
new file mode 100644
index 0000000000..3736786360
--- /dev/null
+++ b/docs/en/serverless/partials/feature-beta.mdx
@@ -0,0 +1,3 @@
+
+ The {props.feature} functionality is in beta and is subject to change. The design and code is less mature than official generally available features and is being provided as-is with no warranties.
+
\ No newline at end of file
diff --git a/docs/en/serverless/partials/roles.mdx b/docs/en/serverless/partials/roles.mdx
new file mode 100644
index 0000000000..06bda64b93
--- /dev/null
+++ b/docs/en/serverless/partials/roles.mdx
@@ -0,0 +1,3 @@
+
+ The **{props.role}** role or higher is required to {props.goal}. To learn more, refer to .
+
\ No newline at end of file
diff --git a/docs/en/serverless/projects/billing.mdx b/docs/en/serverless/projects/billing.mdx
new file mode 100644
index 0000000000..77c1993241
--- /dev/null
+++ b/docs/en/serverless/projects/billing.mdx
@@ -0,0 +1,23 @@
+---
+id: serverlessObservabilityBilling
+slug: /serverless/observability/observability-billing
+title: Observability billing dimensions
+description: Learn about how Observability usage affects pricing.
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Elastic Observability severless projects provide you with all the capabilities of Elastic Observability to monitor critical applications.
+Projects are provided using a Software as a Service (SaaS) model, and pricing is entirely consumption-based.
+
+Your monthly bill is based on the capabilities you use.
+When you use Elastic Observability, your bill is calculated based on data volume, which has these components:
+
+* **Ingest** — Measured by the number of GB of log/event/info data that you send to your Observability project over the course of a month.
+* **Storage/Retention** — This is known as Search AI Lake.
+* In addition to the core ingest and retention dimensions, there is an optional charge to execute synthetic monitors on our testing infrastructure.
+Browser (journey) based tests are charged on a per-test-run basis,
+and Ping (lightweight) tests have an all-you-can-use model per location used.
+
+For detailed Observability serverless project rates, refer to the [Elastic Cloud pricing table](https://cloud.elastic.co/cloud-pricing-table?productType=serverless&project=observability).
diff --git a/docs/en/serverless/projects/create-an-observability-project.mdx b/docs/en/serverless/projects/create-an-observability-project.mdx
new file mode 100644
index 0000000000..1d95b532ef
--- /dev/null
+++ b/docs/en/serverless/projects/create-an-observability-project.mdx
@@ -0,0 +1,40 @@
+---
+id: serverlessObservabilityCreateAnObservabilityProject
+slug: /serverless/observability/create-an-observability-project
+title: Create an Elastic ((observability)) project
+description: Create a fully-managed Elastic ((observability)) project to monitor the health of your applications.
+tags: [ 'serverless', 'observability', 'how-to' ]
+---
+
+import Roles from '../partials/roles.mdx'
+
+
+
+
+
+import Roles from '../partials/roles.mdx'
+
+
+
+To create an SLO, in your ((observability)) project, go to **Observability** → **SLOs**:
+
+* If you're creating your first SLO, you'll see an introductory page. Click the **Create SLO** button.
+* If you've created SLOs before, click the **Create new SLO** button in the upper-right corner of the page.
+
+From here, complete the following steps:
+
+1. Define your service-level indicator (SLI).
+1. Set your objectives.
+1. Describe your SLO.
+
+
+
+## Define your SLI
+
+The type of SLI to use depends on the location of your data:
+
+* Custom KQL: Create an SLI based on raw logs coming from your services.
+* Timeslice metric: Create an SLI based on a custom equation that uses multiple aggregations.
+* Custom metric: Create an SLI to define custom equations from metric fields in your indices.
+* Histogram metric: Create an SLI based on histogram metrics.
+* APM latency and APM availability: Create an SLI based on services using application performance monitoring (APM).
+
+
+
+### Custom KQL
+
+Create an indicator based on any of your ((es)) indices or data views. You define two queries: one that yields the good events from your index, and one that yields the total events from your index.
+
+**Example:** You can define a custom KQL indicator based on the `service-logs` index with the **good query** defined as `nested.field.response.latency <= 100 and nested.field.env : “production”` and the **total query** defined as `nested.field.env : “production”`.
+
+When defining a custom KQL SLI, set the following fields:
+
+* **Index:** The data view or index pattern you want to base the SLI on. For example, `service-logs`.
+* **Timestamp field:** The timestamp field used by the index.
+* **Query filter:** A KQL filter to specify relevant criteria by which to filter the index documents.
+* **Good query:** The query yielding events that are considered good or successful. For example, `nested.field.response.latency <= 100 and nested.field.env : “production”`.
+* **Total query:** The query yielding all events to take into account for computing the SLI. For example, `nested.field.env : “production”`.
+* **Group by:** The field used to group the data based on the values of the specific field. For example, you could group by the `url.domain` field, which would create individual SLOs for each value of the selected field.
+
+
+
+### Custom metric
+
+Create an indicator to define custom equations from metric fields in your indices.
+
+**Example:** You can define **Good events** as the sum of the field `processor.processed` with a filter of `"processor.outcome: \"success\""`, and the **Total events** as the sum of `processor.processed` with a filter of `"processor.outcome: *"`.
+
+When defining a custom metric SLI, set the following fields:
+
+* **Source**
+ * **Index:** The data view or index pattern you want to base the SLI on. For example, `my-service-*`.
+ * **Timestamp field:** The timestamp field used by the index.
+ * **Query filter:** A KQL filter to specify relevant criteria by which to filter the index documents. For example, `'field.environment : "production" and service.name : "my-service"'`.
+* **Good events**
+ * **Metric [A-Z]:** The field that is aggregated using the `sum` aggregation for good events. For example, `processor.processed`.
+ * **Filter [A-Z]:** The filter to apply to the metric for good events. For example, `"processor.outcome: \"success\""`.
+ * **Equation:** The equation that calculates the good metric. For example, `A`.
+* **Total events**
+ * **Metric [A-Z]:** The field that is aggregated using the `sum` aggregation for total events. For example, `processor.processed`.
+ * **Filter [A-Z]:** The filter to apply to the metric for total events. For example, `"processor.outcome: *"`.
+ * **Equation:** The equation that calculates the total metric. For example, `A`.
+* **Group by:** The field used to group the data based on the values of the specific field. For example, you could group by the `url.domain` field, which would create individual SLOs for each value of the selected field.
+
+
+
+### Timeslice metric
+
+Create an indicator based on a custom equation that uses statistical aggregations and a threshold to determine whether a slice is good or bad.
+Supported aggregations include `Average`, `Max`, `Min`, `Sum`, `Cardinality`, `Last value`, `Std. deviation`, `Doc count`, and `Percentile`.
+The equation supports basic math and logic.
+
+
+ This indicator requires you to use the `Timeslices` budgeting method.
+
+
+**Example:** You can define an indicator to determine whether a Kubernetes StatefulSet is healthy.
+First you set the query filter to `orchestrator.cluster.name: "elastic-k8s" AND kubernetes.namespace: "my-ns" AND data_stream.dataset: "kubernetes.state_statefulset"`.
+Then you define an equation that compares the number of ready (healthy) replicas to the number of observed replicas:
+`A == B ? 1 : 0`, where `A` retrieves the last value of `kubernetes.statefulset.replicas.ready` and `B` retrieves the last value of `kubernetes.statefulset.replicas.observed`.
+The equation returns `1` if the condition `A == B` is true (indicating the same number of replicas) or `0` if it's false. If the value is less than 1, you can determine that the Kubernetes StatefulSet is unhealthy.
+
+When defining a timeslice metric SLI, set the following fields:
+
+* **Source**
+ * **Index:** The data view or index pattern you want to base the SLI on. For example, `metrics-*:metrics-*`.
+ * **Timestamp field:** The timestamp field used by the index.
+ * **Query filter:** A KQL filter to specify relevant criteria by which to filter the index documents. For example, `orchestrator.cluster.name: "elastic-k8s" AND kubernetes.namespace: "my-ns" AND data_stream.dataset: "kubernetes.state_statefulset"`.
+* **Metric definition**
+ * **Aggregation [A-Z]:** The type of aggregation to use.
+ * **Field [A-Z]:** The field to use in the aggregation. For example, `kubernetes.statefulset.replicas.ready`.
+ * **Filter [A-Z]:** The filter to apply to the metric.
+ * **Equation:** The equation that calculates the total metric. For example, `A == B ? 1 : 0`.
+ * **Comparator:** The type of comparison to perform.
+ * **Threshold:** The value to use along with the comparator to determine if the slice is good or bad.
+
+
+
+### Histogram metric
+
+Histograms record data in a compressed format and can record latency and delay metrics. You can create an SLI based on histogram metrics using a `range` aggregation or a `value_count` aggregation for both the good and total events. Filtering with KQL queries is supported on both event types.
+
+When using a `range` aggregation, both the `from` and `to` thresholds are required for the range and the events are the total number of events within that range. The range includes the `from` value and excludes the `to` value.
+
+**Example:** You can define your **Good events** using the `processor.latency` field with a filter of `"processor.outcome: \"success\""`, and your **Total events** using the `processor.latency` field with a filter of `"processor.outcome: *"`.
+
+When defining a histogram metric SLI, set the following fields:
+
+* **Source**
+ * **Index:** The data view or index pattern you want to base the SLI on. For example, `my-service-*`.
+ * **Timestamp field:** The timestamp field used by the index.
+ * **Query filter:** A KQL filter to specify relevant criteria by which to filter the index documents. For example, `field.environment : "production" and service.name : "my-service"`.
+* **Good events**
+ * **Aggregation:** The type of aggregation to use for good events, either **Value count** or **Range**.
+ * **Field:** The field used to aggregate events considered good or successful. For example, `processor.latency`.
+ * **From:** (`range` aggregation only) The starting value of the range for good events. For example, `0`.
+ * **To:** (`range` aggregation only) The ending value of the range for good events. For example, `100`.
+ * **KQL filter:** The filter for good events. For example, `"processor.outcome: \"success\""`.
+* **Total events**
+ * **Aggregation:** The type of aggregation to use for total events, either **Value count** or **Range**.
+ * **Field:** The field used to aggregate total events. For example, `processor.latency`.
+ * **From:** (`range` aggregation only) The starting value of the range for total events. For example, `0`.
+ * **To:** (`range` aggregation only) The ending value of the range for total events. For example, `100`.
+ * **KQL filter:** The filter for total events. For example, `"processor.outcome : *"`.
+* **Group by:** The field used to group the data based on the values of the specific field. For example, you could group by the `url.domain` field, which would create individual SLOs for each value of the selected field.
+
+
+
+### APM latency and APM availability
+
+There are two types of SLI you can create based on services using application performance monitoring (APM): APM latency and APM availability.
+
+Use **APM latency** to create an indicator based on the APM data that you received from your instrumented services and a latency threshold.
+
+**Example:** You can define an indicator on an APM service named `banking-service` for the `production` environment, and the transaction name `POST /deposit` with a latency threshold value of 300ms.
+
+Use **APM availability** to create an indicator based on the APM data received from your instrumented services.
+
+**Example:** You can define an indicator on an APM service named `search-service` for the `production` environment, and the transaction name `POST /search`.
+
+When defining either an APM latency or APM availability SLI, set the following fields:
+
+* **Service name:** The APM service name.
+* **Service environment:** Either `all` or the specific environment.
+* **Transaction type:** Either `all` or the specific transaction type.
+* **Transaction name:** Either `all` or the specific transaction name.
+* **Threshold (APM latency only):** The latency threshold in milliseconds (ms) to consider the request as good.
+* **Query filter:** An optional query filter on the APM data.
+
+
+
+## Set your objectives
+
+After defining your SLI, you need to set your objectives. To set your objectives, complete the following:
+
+1. Select your budgeting method
+1. Set your time window
+1. Set your target/SLO percentage
+
+
+
+### Set your time window and duration
+
+Select the durations over which you want to compute your SLO. You can select either a **rolling** or **calendar aligned** time window:
+
+| | |
+|---|---|
+| **Rolling** | Uses data from a specified duration that depends on when the SLO was created, for example the last 30 days. |
+| **Calendar aligned** | Uses data from a specified duration that aligns with calendar, for example weekly or monthly. |
+
+
+
+### Select your budgeting method
+
+You can select either an **occurrences** or a **timeslices** budgeting method:
+
+| | |
+|---|---|
+| **Occurrences** | Uses the number of good events and the number of total events to compute the SLI. |
+| **Timeslices** | Breaks the overall time window into smaller slices of a defined duration, and uses the number of good slices over the number of total slices to compute the SLI. |
+
+
+
+### Set your target/SLO (%)
+
+The SLO target objective as a percentage.
+
+
+
+## Describe your SLO
+
+After setting your objectives, give your SLO a name, a short description, and add any relevant tags.
+
+
+
+## SLO burn rate alert rule
+
+When you use the UI to create an SLO, a default SLO burn rate alert rule is created automatically.
+The burn rate rule will use the default configuration and no connector.
+You must configure a connector if you want to receive alerts for SLO breaches.
+
+For more information about configuring the rule, see Create an SLO burn rate rule.
diff --git a/docs/en/serverless/slos/slos.mdx b/docs/en/serverless/slos/slos.mdx
new file mode 100644
index 0000000000..c5daf0dfbf
--- /dev/null
+++ b/docs/en/serverless/slos/slos.mdx
@@ -0,0 +1,91 @@
+---
+id: serverlessObservabilitySlos
+slug: /serverless/observability/slos
+title: SLOs
+description: Set clear, measurable targets for your service performance with service-level objectives (SLOs).
+tags: [ 'serverless', 'observability', 'overview' ]
+---
+
+
+
+Service-level objectives (SLOs) allow you to set clear, measurable targets for your service performance, based on factors like availability, response times, error rates, and other key metrics.
+You can define SLOs based on different types of data sources, such as custom KQL queries and APM latency or availability data.
+
+Once you've defined your SLOs, you can monitor them in real time, with detailed dashboards and alerts that help you quickly identify and troubleshoot any issues that may arise.
+You can also track your progress against your SLO targets over time, with a clear view of your error budgets and burn rates.
+
+
+
+## Important concepts
+The following table lists some important concepts related to SLOs:
+
+| | |
+|---|---|
+| **Service-level indicator (SLI)** | The measurement of your service's performance, such as service latency or availability. |
+| **SLO** | The target you set for your SLI. It specifies the level of performance you expect from your service over a period of time. |
+| **Error budget** | The amount of time that your SLI can fail to meet the SLO target before it violates your SLO. |
+| **Burn rate** | The rate at which your service consumes your error budget. |
+
+
+
+## SLO overview
+
+From the SLO overview, you can see all of your SLOs and a quick summary of what's happening in each one:
+
+![Dashboard showing list of SLOs](../images/slo-dashboard.png)
+
+Select an SLO from the overview to see additional details including:
+
+* **Burn rate:** the percentage of bad events over different time periods (1h, 6h, 24h, 72h) and the risk of exhausting your error budget within those time periods.
+* **Historical SLI:** the SLI value and how it's trending over the SLO time window.
+* **Error budget burn down:** the remaining error budget and how it's trending over the SLO time window.
+* **Alerts:** active alerts if you've set any SLO burn rate alert rules for the SLO.
+
+![Detailed view of a single SLO](../images/slo-detailed-view.png)
+
+
+
+## Search and filter SLOs
+
+You can apply searches and filters to quickly find the SLOs you're interested in.
+
+![Options for filtering SLOs in the overview](../images/slo-filtering-options.png)
+
+* **Apply structured filters:** Next to the search field, click the **Add filter** icon to add a custom filter. Notice that you can use `OR` and `AND` to combine filters. The structured filter can be disabled, inverted, or pinned across all apps.
+* **Enter a semi-structured search:** In the search field, start typing a field name to get suggestions for field names and operators that you can use to build a structured query. The semi-structured search will filter SLOs for matches, and only return matching SLOs.
+* Use the **Status** and **Tags** menus to include or exclude SLOs from the view based on the status or defined tags.
+
+There are also options to sort and group the SLOs displayed in the overview:
+
+![SLOs sorted by SLO status and grouped by tags](../images/slo-group-by.png)
+
+* **Sort by**: SLI value, SLO status, Error budget consumed, or Error budget remaining.
+* **Group by**: None, Tags, Status, or SLI type.
+* Click icons to switch between a card view (), list view (), or compact view ().
+
+## SLO dashboard panels
+
+SLO data is also available as Dashboard _panels_.
+Panels allow you to curate custom data views and visualizations to bring clarity to your data.
+
+Available SLO panels include:
+
+* **SLO Overview**: Visualize a selected SLO's health, including name, current SLI value, target, and status.
+* **SLO Alerts**: Visualize one or more SLO alerts, including status, rule name, duration, and reason. In addition, configure and update alerts, or create cases directly from the panel.
+
+![Detailed view of an SLO dashboard panel](../images/slo-dashboard-panel.png)
+
+To learn more about Dashboards, see Dashboards.
+
+
+
+## Next steps
+
+Get started using SLOs to measure your service performance:
+
+{/* TODO: Find out if any special privileges are required to grant access to SLOs and document as required. Classic doclink was Configure SLO access */}
+
+*
+*
+*
+*
diff --git a/docs/en/serverless/synthetics/synthetics-analyze.mdx b/docs/en/serverless/synthetics/synthetics-analyze.mdx
new file mode 100644
index 0000000000..0ca1857e02
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-analyze.mdx
@@ -0,0 +1,373 @@
+---
+id: serverlessObservabilitySyntheticsAnalyze
+slug: /serverless/observability/synthetics-analyze
+title: Analyze data from synthetic monitors
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+The Synthetics UI in Observability projects both provides a high-level overview of your service's
+availability and allows you to dig into details to diagnose what caused downtime.
+
+
+
+## Overview
+
+The Synthetics **Overview** tab provides you with a high-level view of all the services you are monitoring
+to help you quickly diagnose outages and other connectivity issues within your network.
+
+To access this page in your Observability project, go to **Synthetics** → **Overview**.
+
+This overview includes a snapshot of the current status of all monitors, the number of errors that
+occurred over the last 6 hours, and the number of alerts over the last 12 hours.
+All monitors created using a Synthetics project or using the UI will be listed below with information
+about the location, current status, and duration average.
+
+
+
+When you use a single monitor configuration to create monitors in multiple locations, each location
+is listed as a separate monitor as they run as individual monitors and the status and duration average
+can vary by location.
+
+
+
+![Synthetics UI in an Observability project](../images/synthetics-monitor-page.png)
+
+To get started with your analysis in the Overview tab, you can search for monitors or
+use the filter options including current status (up, down, or disabled),
+monitor type (for example, journey or HTTP), location, and more.
+
+Then click an individual monitor to see some details in a flyout.
+From there, you can click **Go to monitor** to go to an individual monitor's page
+to see more details (as described below).
+
+
+
+## All monitor types
+
+When you go to an individual monitor's page, you'll see much more detail about the monitor's
+performance over time. The details vary by monitor type, but for every monitor at the top of the
+page you'll see:
+
+* The monitor's **name** with a down arrow icon that you can use to quickly move between monitors.
+* The **location** of the monitor. If the same monitor configuration was used to create monitors in
+ multiple locations, you'll also see a down arrow icon that you can use to quickly move between
+ locations that use the same configuration.
+
+* The latest **status** and when the monitor was **last run**.
+* The ** Run test manually** button that allows you to run the test on
+ demand before the next scheduled run.
+
+
+
+ This is only available for monitors running on Elastic's global managed testing infrastructure.
+ It is not available for monitors running on ((private-location))s.
+
+
+
+* The ** Edit monitor** button that allows you to edit the monitor's
+ configuration.
+
+![Header at the top of the individual monitor page for all monitor types in the Synthetics UI](../images/synthetics-analyze-individual-monitor-header.png)
+
+Each individual monitor's page has three tabs: Overview, History, and Errors.
+
+
+
+### Overview
+
+The **Overview** tab has information about the monitor availability, duration, and any errors
+that have occurred since the monitor was created.
+The _Duration trends_ chart displays the timing for each check that was performed in the last 30 days.
+This visualization helps you to gain insights into how quickly requests resolve by the targeted endpoint
+and gives you a sense of how frequently a host or endpoint was down.
+
+![Details in the Overview tab on the individual monitor page for all monitor types in the Synthetics UI](../images/synthetics-analyze-individual-monitor-details.png)
+
+
+
+### History
+
+The **History** tab has information on every time the monitor has run.
+It includes some high-level stats and a complete list of all test runs.
+Use the calendar icon () and search bar
+to filter for runs that occurred in a specific time period.
+
+{/* What you might do with this info */}
+{/* ... */}
+
+For browser monitors, you can click on any run in the **Test runs** list
+to see the details for that run. Read more about what information is
+included the in Details for one run section below.
+
+![The History tab on the individual monitor page for all monitor types in the Synthetics UI](../images/synthetics-analyze-individual-monitor-history.png)
+
+If the monitor is configured to retest on failure,
+you'll see retests listed in the **Test runs** table. Runs that are retests include a
+rerun icon (image:images/icons/refresh.svg[Refresh icon]) next to the result badge.
+
+![A failed run and a retest in the table of test runs in the Synthetics UI](../images/synthetics-retest.png)
+
+
+
+### Errors
+
+The **Errors** tab has information on failed runs.
+If the monitor is configured to retest on failure,
+failed runs will only result in an error if both the initial run and the rerun fail.
+This can reduce noise related to transient problems.
+
+The Errors tab includes a high-level overview of all alerts and a complete list of all failures.
+Use the calendar icon () and search bar
+to filter for runs that occurred in a specific time period.
+
+{/* What you might do with this info */}
+{/* ... */}
+
+For browser monitors, you can click on any run in the **Error** list
+to open an **Error details** page that includes most of the same information
+that is included the in Details for one run section below.
+
+![The Errors tab on the individual monitor page for all monitor types in the Synthetics UI](../images/synthetics-analyze-individual-monitor-errors.png)
+
+
+
+## Browser monitors
+
+For browser monitors, you can look at results at various levels of granularity:
+
+* See an overview of journey runs over time.
+* Drill down into the details of a single run.
+* Drill down further into the details of a single _step_ within a journey.
+
+
+
+### Journey runs over time
+
+The journey page on the Overview tab includes:
+
+* An overview of the **last test run** including high-level information for each step.
+* **Alerts** to date including both active and recovered alerts.
+* **Duration by step** over the last 24 hours.
+* A list of the **last 10 test runs** that link to the details for each run.
+
+![Individual journey page for browser monitors in the Synthetics UI](../images/synthetics-analyze-journeys-over-time.png)
+
+From here, you can either drill down into:
+
+* The latest run of the full journey by clicking ** View test run**
+ or a past run in the list of **Last 10 test runs**.
+ This will take you to the view described below in Details for one run.
+
+* An individual step in this run by clicking the performance breakdown icon
+ () next to one of the steps.
+ This will take you to the view described below in Details for one step.
+
+
+
+### Details for one run
+
+The page detailing one run for a journey includes more information on each step in the current run
+and opportunities to compare each step to the same step in previous runs.
+
+{/* What info it includes */}
+At the top of the page, see the _Code executed_ and any _Console_ output for each step.
+If the step failed, this will also include a _Stacktrace_ tab that you can use to
+diagnose the cause of errors.
+
+Navigate through each step using ** Previous** and
+**Next **.
+
+{/* Screenshot of the viz */}
+![Step carousel on a page detailing one run of a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-run-code-executed.png)
+
+{/* What info it includes */}
+Scroll down to dig into the steps in this journey run.
+Click the icon next to the step number to show details.
+The details include metrics for the step in the current run and the step in the last successful run.
+Read more about step-level metrics below in Timing and
+Metrics.
+
+{/* What you might do with this info */}
+This is particularly useful to compare the metrics for a failed step to the last time it completed successfully
+when trying to diagnose the reason it failed.
+
+{/* Screenshot of the viz */}
+![Step list on a page detailing one run of a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-run-compare-steps.png)
+
+Drill down to see even more details for an individual step by clicking the performance breakdown icon
+() next to one of the steps.
+This will take you to the view described below in Details for one step.
+
+
+
+### Details for one step
+
+After clicking the performance breakdown icon ()
+you'll see more detail for an individual step.
+
+
+
+#### Screenshot
+
+{/* What info it includes */}
+By default the synthetics library will capture a screenshot for each step regardless of
+whether the step completed or failed.
+
+
+
+Customize screenshot behavior for all monitors in the configuration file,
+for one monitor using `monitor.use`, or for a run using
+the CLI.
+
+
+
+{/* What you might do with this info */}
+Screenshots can be particularly helpful to identify what went wrong when a step fails because of a change to the UI.
+You can compare the failed step to the last time the step successfully completed.
+
+{/* Screenshot of the viz */}
+![Screenshot for one step in a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-step-screenshot.png)
+
+
+
+#### Timing
+
+The **Timing** visualization shows a breakdown of the time spent in each part of
+the resource loading process for the step including:
+
+* **Blocked**: The request was initiated but is blocked or queued.
+* **DNS**: The DNS lookup to convert the hostname to an IP Address.
+* **Connect**: The time it took the request to connect to the server.
+ Lengthy connections could indicate network issues, connection errors, or an overloaded server.
+
+* **TLS**: If your page is loading resources securely over TLS, this is the time it took to set up that connection.
+* **Wait**: The time it took for the response generated by the server to be received by the browser.
+ A lengthy Waiting (TTFB) time could indicate server-side issues.
+
+* **Receive**: The time it took to receive the response from the server,
+ which can be impacted by the size of the response.
+
+* **Send**: The time spent sending the request data to the server.
+
+Next to each network timing metric, there's an icon that indicates whether the value is
+higher (),
+lower (),
+or the same ()
+compared to the median of all runs in the last 24 hours.
+Hover over the icon to see more details in a tooltip.
+
+{/* What you might do with this info */}
+This gives you an overview of how much time is spent (and how that time is spent) loading resources.
+This high-level information may not help you diagnose a problem on its own, but it could act as a
+signal to look at more granular information in the Network requests section.
+
+{/* Screenshot of the viz */}
+![Network timing visualization for one step in a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-step-timing.png)
+
+
+
+#### Metrics
+
+{/* What info it includes */}
+The **Metrics** visualization gives you insight into the performance of the web page visited in
+the step and what a user would experience when going through the current step.
+Metrics include:
+
+* **First contentful paint (FCP)** focuses on the initial rendering and measures the time from
+ when the page starts loading to when any part of the page's content is displayed on the screen.
+
+* **Largest contentful paint (LCP)** measures loading performance. To provide a good user experience,
+ LCP should occur within 2.5 seconds of when the page first starts loading.
+
+* **Cumulative layout shift (CLS)** measures visual stability. To provide a good user experience,
+ pages should maintain a CLS of less than 0.1.
+
+* **`DOMContentLoaded` event (DCL)** is triggered when the browser completes parsing the document.
+ Helpful when there are multiple listeners, or logic is executed:
+ `domContentLoadedEventEnd - domContentLoadedEventStart`.
+
+* **Transfer size** represents the size of the fetched resource. The size includes the response header
+ fields plus the response payload body.
+
+
+
+Largest contentful paint and Cumulative layout shift are part of Google's
+[Core Web Vitals](https://web.dev/vitals/), an initiative that introduces a set of metrics
+that help categorize good and bad sites by quantifying the real-world user experience.
+
+
+
+Next to each metric, there's an icon that indicates whether the value is
+higher (),
+lower (),
+or the same ()
+compared to all runs over the last 24 hours.
+Hover over the icon to see more details in a tooltip.
+
+{/* Screenshot of the viz */}
+![Metrics visualization for one step in a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-step-metrics.png)
+
+
+
+#### Object weight and count
+
+{/* What info it includes */}
+The **Object weight** visualization shows the cumulative size of downloaded resources by type,
+and **Object count** shows the number of individual resources by type.
+
+{/* What you might do with this info */}
+This provides a different kind of analysis.
+For example, you might have a large number of JavaScript files,
+each of which will need a separate download, but they may be collectively small.
+This could help you identify an opportunity to improve efficiency by combining multiple files into one.
+
+{/* Screenshot of the viz */}
+![Object visualization for one step in a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-step-object.png)
+
+
+
+#### Network requests
+
+{/* What info it includes */}
+The **Network requests** visualization is a waterfall chart that shows every request
+the page made when a user executed it.
+Each line in the chart represents an HTTP network request and helps you quickly identify
+what resources are taking the longest to load and in what order they are loading.
+
+The colored bars within each line indicate the time spent per resource.
+Each color represents a different part of that resource's loading process
+(as defined in the Timing section above) and
+includes the time spent downloading content for specific
+Multipurpose Internet Mail Extensions (MIME) types:
+HTML, JS, CSS, Media, Font, XHR, and Other.
+
+Understanding each phase of a request can help you improve your site's speed by
+reducing the time spent in each phase.
+
+{/* Screenshot of the viz */}
+![Network requests waterfall visualization for one step in a browser monitor in the Synthetics UI](../images/synthetics-analyze-one-step-network.png)
+
+Without leaving the waterfall chart, you can view data points relating to each resource:
+resource details, request headers, response headers, and certificate headers.
+On the waterfall chart, select a resource name, or any part of each row,
+to display the resource details overlay.
+
+For additional analysis, whether to check the content of a CSS file or to view a specific image,
+click the icon located beside each resource,
+to view its content in a new tab.
+
+You can also navigate between steps and checks at the top of the page to
+view the corresponding waterfall charts.
+
+{/* [discrete] */}
+{/* */}
+{/* = Anomalies */}
+
+{/* [discrete] */}
+{/* */}
+{/* = Alerts */}
diff --git a/docs/en/serverless/synthetics/synthetics-command-reference.mdx b/docs/en/serverless/synthetics/synthetics-command-reference.mdx
new file mode 100644
index 0000000000..83950f2b79
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-command-reference.mdx
@@ -0,0 +1,357 @@
+---
+id: serverlessObservabilitySyntheticsCommandReference
+slug: /serverless/observability/synthetics-command-reference
+title: Use the Synthetics CLI
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+
+
+## `@elastic/synthetics`
+
+Elastic uses the [@elastic/synthetics](https://www.npmjs.com/package/@elastic/synthetics[@elastic/synthetics)
+library to run synthetic browser tests and report the test results.
+The library also provides a CLI to help you scaffold, develop/run tests locally, and push tests to Elastic.
+
+```sh
+npx @elastic/synthetics [options] [files] [dir]
+```
+
+You will not need to use most command line flags.
+However, there are some you may find useful:
+
+
+ `--match `
+
+ Run tests with a name or tags that match the given glob pattern.
+
+
+ `--tags Array`
+
+ Run tests with the given tags that match the given glob pattern.
+
+
+ `--pattern `
+
+ RegExp pattern to match journey files in the current working directory. Defaults
+ to `/*.journey.(ts|js)$/`, which matches files ending with `.journey.ts` or `.journey.js`.
+
+
+ `--params `
+
+ JSON object that defines any variables your tests require.
+ Read more in Work with params and secrets.
+
+ Params passed will be merged with params defined in your
+ `synthetics.config.js` file.
+ Params defined via the CLI take precedence.
+
+
+ `--playwright-options `
+
+ JSON object to pass in custom Playwright options for the agent.
+ For more details on relevant Playwright options, refer to the
+ the configuration docs.
+
+ Options passed will be merged with Playwright options defined in your
+ `synthetics.config.js` file.
+ Options defined via the CLI take precedence.
+
+
+ `--screenshots `
+
+ Control whether or not to capture screenshots at the end of each step.
+ Options include `'on'`, `'off'`, or `'only-on-failure'`.
+
+ This can also be set in the configuration file using
+ `monitor.screenshot`.
+ The value defined via the CLI will take precedence.
+
+
+ `-c, --config `
+
+ Path to the configuration file. By default, test runner looks for a
+ `synthetics.config.(js|ts)` file in the current directory. Synthetics
+ configuration provides options to configure how your tests are run and pushed to
+ Elastic. Allowed options are described in the
+
+
+ `--reporter `
+
+ One of `json`, `junit`, `buildkite-cli`, or `default`. Use the JUnit or Buildkite
+ reporter to provide easily parsed output to CI systems.
+
+
+ `--inline`
+
+ Instead of reading from a file, `cat` inline scripted journeys and pipe them through `stdin`.
+ For example, `cat path/to/file.js | npx @elastic/synthetics --inline`.
+
+
+ `--no-throttling`
+
+ Does not apply throttling.
+
+ Throttling can also be disabled in the configuration file using
+ `monitor.throttling`.
+ The value defined via the CLI will take precedence.
+
+
+ Network throttling for browser based monitors is disabled.
+ See this [documention](https://github.com/elastic/synthetics/blob/main/docs/throttling.md) for more details.
+
+
+
+ `--no-headless`
+
+ Runs with the browser in headful mode.
+
+ This is the same as setting [Playwright's `headless` option](https://playwright.dev/docs/api/class-testoptions#test-options-headless) to `false` by running `--playwright-options '{"headless": false}'`.
+
+
+ Headful mode should only be used locally to see the browser and interact with DOM elements directly for testing purposes. Do not attempt to run in headful mode when running through Elastic's global managed testing infrastructure or ((private-location))s as this is not supported.
+
+
+
+ `-h, --help`
+
+ Shows help for the `npx @elastic/synthetics` command.
+
+
+
+
+
+ The `--pattern`, `--tags`, and `--match` flags for filtering are only supported when you
+ run synthetic tests locally or push them to Elastic. Filtering is _not_ supported in any other subcommands
+ like `init` and `locations`.
+
+
+
+ For debugging synthetic tests locally, you can set an environment variable,
+ `DEBUG=synthetics npx @elastic/synthetics`, to capture Synthetics agent logs.
+
+
+
+
+## `@elastic/synthetics init`
+
+Scaffold a new Synthetics project using Elastic Synthetics.
+
+This will create a template Node.js project that includes the synthetics agent, required dependencies,
+a synthetics configuration file, and example browser and lightweight monitor files.
+These files can be edited and then pushed to Elastic to create monitors.
+
+```sh
+npx @elastic/synthetics init
+```
+
+Read more about what's included in a template Synthetics project in Create a Synthetics project.
+
+
+
+## `@elastic/synthetics push`
+
+Create monitors in by using your local journeys. By default, running
+`push` command will use the `project` settings field from the `synthetics.config.ts`
+file, which is set up using the `init` command. However, you can override these
+settings using the CLI flags.
+
+```sh
+SYNTHETICS_API_KEY= npx @elastic/synthetics push --url --id
+```
+
+
+ The `push` command includes interactive prompts to prevent you from accidentally deleting or duplicating monitors.
+ You will see a prompt when:
+
+ * You `push` a project that used to contain one or more monitors but either no longer
+ contains previously running monitors or has any monitors.
+ Select `yes` to delete the monitors associated with the project ID being pushed.
+ * You `push` a Synthetics project that's already been pushed using one Synthetics project ID and then try to `push`
+ it using a _different_ ID.
+ Select `yes` to create duplicates of all monitors in the project.
+ You can set `DEBUG=synthetics` environment variable to capture the deleted monitors.
+
+
+
+ If the journey contains external NPM packages other than the `@elastic/synthetics`,
+ those packages will be bundled along with the journey code when the `push` command is invoked.
+ However there are some limitations when using external packages:
+
+ * Bundled journeys after compression should not be more than 1500 Kilobytes.
+ * Native node modules will not work as expected due to platform inconsistency.
+ * Uploading files in journey scripts(via locator.setInputFiles) is not supported.
+
+
+
+ `--auth `
+
+ API key used for authentication. You can also set the API key via the `SYNTHETICS_API_KEY` environment variable.
+
+ To create an API key, you must be logged in as a user with
+ Editor access.
+
+
+ `--id `
+
+ A unique id associated with your Synthetics project.
+ It will be used for logically grouping monitors.
+
+ If you used `init` to create a Synthetics project, this is the `` you specified.
+
+ This can also be set in the configuration file using
+ `project.id`.
+ The value defined via the CLI will take precedence.
+
+
+ `--url `
+
+ The URL for the Observability project to which you want to upload the monitors.
+
+ This can also be set in the configuration file using
+ `project.url`.
+ The value defined via the CLI will take precedence.
+
+
+ `--schedule `
+
+ The interval (in minutes) at which the monitor should run.
+
+ This can also be set in the configuration file using
+ `monitor.schedule`.
+ The value defined via the CLI will take precedence.
+
+
+ [`--locations Array`](https://github.com/elastic/synthetics/blob/((synthetics_version))/src/locations/public-locations.ts#L28-L37)
+
+ Where to deploy the monitor. Monitors can be deployed in multiple locations so that you can detect differences in availability and response times across those locations.
+
+ To list available locations, refer to `@elastic/synthetics locations`.
+
+ This can also be set in the configuration file using
+ `monitor.locations` in the configuration file.
+ The value defined via the CLI will take precedence.
+
+
+ `--private-locations Array`
+
+ The ((private-location))s to which the monitors will be deployed. These ((private-location))s refer to locations hosted and managed by you, whereas
+ `locations` are hosted by Elastic. You can specify a ((private-location)) using the location's name.
+
+ To list available ((private-location))s, refer to `@elastic/synthetics locations`.
+
+ This can also be set in the configuration file using
+ `monitor.privateLocations` in the configuration file.
+ The value defined via the CLI will take precedence.
+
+
+ `--yes`
+
+ The `push` command includes interactive prompts to prevent you from accidentally deleting or duplicating monitors.
+ If running the CLI non-interactively, you can override these prompts using the `--yes` option.
+ When the `--yes` option is passed to `push`:
+
+ * If you `push` a Synthetics project that used to contain one or more monitors but no longer contains any monitors,
+ all monitors associated with the Synthetics project ID being pushed will be deleted.
+
+ * If you `push` a Synthetics project that's already been pushed using one Synthetics project ID and then try to `push`
+ it using a _different_ ID, it will create duplicates of all monitors in the Synthetics project.
+
+
+
+
+## Tag monitors
+
+Synthetics journeys can be tagged with one or more tags. Use tags to
+filter journeys when running tests locally or pushing them to Elastic.
+
+To add tags to a single journey, add the `tags` parameter to the `journey` function or
+use the `monitor.use` method.
+
+```js
+import {journey, monitor} from "@elastic/synthetics";
+journey({name: "example journey", tags: ["env:qa"] }, ({ page }) => {
+ monitor.use({
+ tags: ["env:qa"]
+ })
+ // Add steps here
+});
+```
+
+For lightweight monitors, use the `tags` field in the yaml configuration file.
+```yaml
+name: example monitor
+tags:
+ - env:qa
+```
+
+To apply tags to all browser and lightweight monitors, configure using the `monitor.tags` field in the `synthetics.config.ts` file.
+
+## Filter monitors
+
+When running the `npx @elastic/synthetics push` command, you can filter the monitors that are pushed to Elastic using the following flags:
+
+
+ `--tags Array`
+
+ Push monitors with the given tags that match the glob pattern.
+
+
+ `--match `
+
+ Push monitors with a name or tags that match the glob pattern.
+
+
+ `--pattern `
+
+ RegExp pattern to match the journey files in the current working directory.
+ Defaults to `/*.journey.(ts|js)$/` for browser monitors and `/.(yml|yaml)$/` for
+ lightweight monitors.
+
+
+
+You can combine these techniques and push the monitors to different projects based on the tags by using multiple configuration files.
+
+```sh
+npx @elastic/synthetics push --config synthetics.qa.config.ts --tags env:qa
+npx @elastic/synthetics push --config synthetics.prod.config.ts --tags env:prod
+```
+
+
+
+## `@elastic/synthetics locations`
+
+List all available locations for running synthetics monitors.
+
+```sh
+npx @elastic/synthetics locations --url --auth
+```
+
+Run `npx @elastic/synthetics locations` with no flags to list all the available global locations managed by Elastic for running synthetics monitors.
+
+To list both locations on Elastic's global managed infrastructure and ((private-location))s, include:
+
+
+ `--url `
+
+ The URL for the Observability project from which to fetch all available public and ((private-location))s.
+
+
+ `--auth `
+
+ API key used for authentication.
+
+
+
+{/*
+ If an administrator has disabled Elastic managed locations for the role you are assigned
+ and you do _not_ include `--url` and `--auth`, all global locations managed by Elastic will be listed.
+ However, you will not be able to push to these locations with your API key and will see an error:
+ _You don't have permission to use Elastic managed global locations_. For more details, refer to the
+ troubleshooting docs.
+ */}
diff --git a/docs/en/serverless/synthetics/synthetics-configuration.mdx b/docs/en/serverless/synthetics/synthetics-configuration.mdx
new file mode 100644
index 0000000000..bfeec9e22b
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-configuration.mdx
@@ -0,0 +1,178 @@
+---
+id: serverlessObservabilitySyntheticsConfiguration
+slug: /serverless/observability/synthetics-configuration
+title: Configure a Synthetics project
+# description: Description to be written
+tags: []
+---
+
+
+
+import Snippet1 from '../transclusion/synthetics/configuration/monitor-config-options.mdx'
+
+
+
+Synthetic tests support the configuration of dynamic parameters that can be
+used in Synthetics projects. In addition, the Synthetics agent, which is built on top
+of Playwright, supports configuring browser and context options that are available
+in Playwright-specific methods, for example, `ignoreHTTPSErrors`, `extraHTTPHeaders`, and `viewport`.
+
+
+
+Create a `synthetics.config.js` or `synthetics.config.ts` file in the root of the
+Synthetics project and specify the options. For example:
+
+```ts
+import type { SyntheticsConfig } from '@elastic/synthetics';
+
+export default env => {
+ const config: SyntheticsConfig = {
+ params: {
+ url: 'https://www.elastic.co',
+ },
+ playwrightOptions: {
+ ignoreHTTPSErrors: false,
+ },
+ /**
+ * Configure global monitor settings
+ */
+ monitor: {
+ schedule: 10,
+ locations: [ 'us_east' ],
+ },
+ /**
+ * Synthetic project monitors settings
+ */
+ project: {
+ id: 'my-synthetics-project',
+ url: 'https://abc123',
+ },
+ };
+ if (env !== 'development') {
+ /**
+ * Override configuration specific to environment
+ * For example, config.params.url = ""
+ */
+ }
+ return config;
+};
+```
+
+
+ `env` in the example above is the environment you are pushing from
+ _not_ the environment where monitors will run. In other words, `env`
+ corresponds to the configured `NODE_ENV`.
+
+
+The configuration file can either export an object, or a function that when
+called should return the generated configuration. To know more about configuring
+the tests based on environments, look at the dynamic configuration documentation.
+
+
+
+## `params`
+
+A JSON object that defines any variables your tests require.
+Read more in Work with params and secrets.
+
+
+
+## `playwrightOptions`
+
+For all available options, refer to the [Playwright documentation](https://playwright.dev/docs/test-configuration).
+
+
+ Do not attempt to run in headful mode (using `headless:false`) when running through Elastic's global managed testing infrastructure or Private Locations as this is not supported.
+
+
+Below are details on a few Playwright options that are particularly relevant to Elastic Synthetics including timeouts, timezones, and device emulation.
+
+
+
+### Timeouts
+
+Playwright has two types of timeouts that are used in Elastic Synthetics:
+[action and navigation timeouts](https://playwright.dev/docs/test-timeouts#action-and-navigation-timeouts).
+
+Elastic Synthetics uses a default action and navigation timeout of 50 seconds.
+You can override this default using [`actionTimeout`](https://playwright.dev/docs/api/class-testoptions#test-options-action-timeout) and [`navigationTimeout`](https://playwright.dev/docs/api/class-testoptions#test-options-navigation-timeout)
+in `playwrightOptions`.
+
+
+
+### Timezones and locales
+
+The Elastic global managed testing infrastructure does not currently set the timezone.
+For ((private-location))s, the monitors will use the timezone of the host machine running
+the ((agent)). This is not always desirable if you want to test how a web application
+behaves across different timezones. To specify what timezone to use when the monitor runs,
+you can use `playwrightOptions` on a per monitor or global basis.
+
+To use a timezone and/or locale for all monitors in the Synthetics project, set
+[`locale` and/or `timezoneId`](https://playwright.dev/docs/emulation#locale%2D%2Dtimezone)
+in the configuration file:
+
+```js
+playwrightOptions: {
+ locale: 'en-AU',
+ timezoneId: 'Australia/Brisbane',
+}
+```
+
+To use a timezone and/or locale for a _specific_ monitor, add these options to a
+journey using `monitor.use`.
+
+
+
+### Device emulation
+
+Users can emulate a mobile device using the configuration file.
+The example configuration below runs tests in "Pixel 5" emulation mode.
+
+```js
+import { SyntheticsConfig } from "@elastic/synthetics"
+import { devices } from "playwright-chromium"
+
+const config: SyntheticsConfig = {
+ playwrightOptions: {
+ ...devices['Pixel 5']
+ }
+}
+
+export default config;
+```
+
+
+
+## `project`
+
+Information about the Synthetics project.
+
+
+ `id` (`string`)
+
+ A unique id associated with your Synthetics project.
+ It will be used for logically grouping monitors.
+
+ If you used `init` to create a Synthetics project, this is the `` you specified.
+
+
+ `url` (`string`)
+
+ The URL for the Observability project to which you want to upload the monitors.
+
+
+
+
+
+## `monitor`
+
+Default values to be applied to _all_ monitors when using the `@elastic/synthetics` `push` command.
+
+
+
+For information on configuring monitors individually, refer to:
+
+* Configure individual browser monitors for browser monitors
+* Configure lightweight monitors for lightweight monitors
+
diff --git a/docs/en/serverless/synthetics/synthetics-create-test.mdx b/docs/en/serverless/synthetics/synthetics-create-test.mdx
new file mode 100644
index 0000000000..005e81d86a
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-create-test.mdx
@@ -0,0 +1,498 @@
+---
+id: serverlessObservabilitySyntheticsCreateTest
+slug: /serverless/observability/synthetics-create-test
+title: Write a synthetic test
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+After setting up a Synthetics project, you can start writing synthetic tests that check critical actions and requests that an end-user might make
+on your site.
+
+
+
+## Syntax overview
+
+To write synthetic tests for your application, you'll need to know basic JavaScript and
+[Playwright](https://playwright.dev/) syntax.
+
+
+[Playwright](https://playwright.dev/) is a browser testing library developed by Microsoft.
+It's fast, reliable, and features a modern API that automatically waits for page elements to be ready.
+
+
+The synthetics agent exposes an API for creating and running tests, including:
+
+
+
+ `journey`
+
+ Tests one discrete unit of functionality. Takes two parameters: a `name` (string) and a `callback` (function).
+
+ Learn more in Create a journey.
+
+
+
+ `step`
+
+ Actions within a journey that should be completed in a specific order. Takes two parameters: a `name` (string) and a `callback` (function).
+
+ Learn more in Add steps.
+
+
+
+ `expect`
+
+ Check that a value meets a specific condition. There are several supported checks.
+
+ Learn more in Make assertions.
+
+
+
+ `beforeAll`
+
+ Runs a provided function once, before any `journey` runs. If the provided function is a promise, the runner will wait for the promise to resolve before invoking the `journey`. Takes one parameter: a `callback` (function).
+
+ Learn more in Set up and remove a global state.
+
+
+
+ `before`
+
+ Runs a provided function before a single `journey` runs. Takes one parameter: a `callback` (function).
+
+ Learn more in Set up and remove a global state.
+
+
+
+ `afterAll`
+
+ Runs a provided function once, after all the `journey` runs have completed. Takes one parameter: a `callback` (function).
+
+ Learn more in Set up and remove a global state.
+
+
+
+ `after`
+
+ Runs a provided function after a single `journey` has completed. Takes one parameter: a `callback` (function).
+
+ Learn more in Set up and remove a global state.
+
+
+
+ `monitor`
+
+ The `monitor.use` method allows you to determine a monitor's configuration on a journey-by-journey basis. If you want two journeys to create monitors with different intervals, for example, you should call `monitor.use` in each of them and set the `schedule` property to different values in each. Note that this is only relevant when using the `push` command to create monitors in your Observability project.
+
+ Learn more in Configure individual browser monitors.
+
+
+
+
+
+
+## Create a journey
+
+Create a new file using the `.journey.ts` or `.journey.js` file extension or edit one of the example journey files.
+
+A _journey_ tests one discrete unit of functionality.
+For example, logging into a website, adding something to a cart, or joining a mailing list.
+
+The journey function takes two parameters: a `name` and a `callback`.
+The `name` helps you identify an individual journey.
+The `callback` argument is a function that encapsulates what the journey does.
+The callback provides access to fresh Playwright `page`, `params`, `browser`, and `context` instances.
+
+```js
+journey('Journey name', ({ page, browser, context, params, request }) => {
+ // Add steps here
+});
+```
+
+
+
+### Arguments
+
+
+
+
+ **`name`** (_string_)
+
+
+
+ A user-defined string to describe the journey.
+
+
+
+
+
+
+ **`callback`** (_function_)
+
+
+
+ A function where you will add steps.
+
+ **Instances**:
+
+ `page`
+ : A [page](https://playwright.dev/docs/api/class-page) object from Playwright
+ that lets you control the browser's current page.
+
+ `browser`
+ : A [browser]({book['playwright-api-docs']}) object created by Playwright.
+
+ `context`
+ : A [browser context](https://playwright.dev/docs/api/class-browsercontext)
+ that doesn't share cookies or cache with other browser contexts.
+
+ `params`
+ : User-defined variables that allow you to invoke the Synthetics suite with custom parameters.
+ For example, if you want to use a different homepage depending on the `env`
+ (`localhost` for `dev` and a URL for `prod`). See Work with params and secrets
+ for more information.
+
+ `request`
+ : A request object that can be used to make API requests independently of the browser
+ interactions. For example, to get authentication credentials or tokens in service of a
+ browser-based test. See Make API requests for more information.
+
+
+
+
+
+
+
+
+## Add steps
+
+A journey consists of one or more _steps_. Steps are actions that should be completed in a specific order.
+Steps are displayed individually in the Synthetics UI along with screenshots for convenient debugging and error tracking.
+
+A basic two-step journey would look like this:
+
+```js
+journey('Journey name', ({ page, browser, client, params, request }) => {
+ step('Step 1 name', () => {
+ // Do something here
+ });
+ step('Step 2 name', () => {
+ // Do something else here
+ });
+});
+```
+
+Steps can be as simple or complex as you need them to be.
+For example, a basic first step might load a web page:
+
+```js
+step('Load the demo page', () => {
+ await page.goto('https://elastic.github.io/synthetics-demo/'); [^1]
+});
+```
+[^1]: Go to the [`page.goto` reference](https://playwright.dev/docs/api/class-page#page-goto) for more information.
+
+
+
+### Arguments
+
+
+
+ **`name`** (_string_)
+
+ A user-defined string to describe the journey.
+
+
+
+ **`callback`** (_function_)
+
+ A function where you simulate user workflows using Synthetics and Playwright syntax.
+
+
+
+
+
+
+If you want to generate code by interacting with a web page directly, you can use the **Synthetics Recorder**.
+
+The recorder launches a [Chromium browser](https://www.chromium.org/Home/) that will listen to each interaction you have with the web page and record them internally using Playwright.
+When you're done interacting with the browser, the recorder converts the recorded actions into JavaScript code that you can use with Elastic Synthetics or ((heartbeat)).
+
+For more details on getting started with the Synthetics Recorder, refer to Use the Synthetics Recorder.
+
+
+
+
+
+### Playwright syntax
+
+Inside the callback for each step, you'll likely use a lot of Playwright syntax.
+Use Playwright to simulate and validate user workflows including:
+
+* Interacting with the [browser](https://playwright.dev/docs/api/class-browser)
+ or the current [page](https://playwright.dev/docs/api/class-page) (like in the example above).
+
+* Finding elements on a web page using [locators](https://playwright.dev/docs/api/class-locator).
+* Simulating [mouse](https://playwright.dev/docs/api/class-mouse),
+ [touch](https://playwright.dev/docs/api/class-touchscreen), or
+ [keyboard](https://playwright.dev/docs/api/class-keyboard) events.
+
+* Making assertions using [`@playwright/test`'s `expect` function](https://playwright.dev/docs/test-assertions). Read more in Make assertions.
+
+Visit the [Playwright documentation](https://playwright.dev/docs) for information.
+
+
+
+Do not attempt to run in headful mode (using `headless:false`) when running through Elastic's global managed testing infrastructure or Private Locations as this is not supported.
+
+
+
+However, not all Playwright functionality should be used with Elastic Synthetics.
+In some cases, there are alternatives to Playwright functionality built into the
+Elastic Synthetics library. These alternatives are designed to work better for
+synthetic monitoring. Do _not_ use Playwright syntax to:
+
+* **Make API requests.** Use Elastic Synthetic's `request`
+ parameter instead. Read more in Make API requests.
+
+There is also some Playwright functionality that is not supported out-of-the-box
+in Elastic Synthetics including:
+
+* [Videos](https://playwright.dev/docs/api/class-video)
+* The [`toHaveScreenshot`](https://playwright.dev/docs/api/class-locatorassertions#locator-assertions-to-have-screenshot-1) and [`toMatchSnapshot`](https://playwright.dev/docs/api/class-snapshotassertions) assertions
+
+
+ Captures done programmatically via https://playwright.dev/docs/api/class-page#page-screenshot[`screenshot`] or https://playwright.dev/docs/api/class-page#page-video[`video`] are not stored and are not shown in the Synthetics application. Providing a `path` will likely make the monitor fail due to missing permissions to write local files.
+
+
+
+
+## Make assertions
+
+A more complex `step` might wait for a page element to be selected
+and then make sure that it matches an expected value.
+
+Elastic Synthetics uses `@playwright/test`'s `expect` function to make assertions
+and supports most [Playwright assertions](https://playwright.dev/docs/test-assertions).
+Elastic Synthetics does _not_ support [`toHaveScreenshot`](https://playwright.dev/docs/api/class-locatorassertions#locator-assertions-to-have-screenshot-1)
+or any [Snapshot Assertions](https://playwright.dev/docs/api/class-snapshotassertions).
+
+For example, on a page using the following HTML:
+
+```html
+
+
todos
+
+
+```
+
+You can verify that the `input` element with class `new-todo` has the expected `placeholder` value
+(the hint text for `input` elements) with the following test:
+
+```js
+step('Assert placeholder text', async () => {
+ const input = await page.locator('input.new-todo'); [^1]
+ expect(await input.getAttribute('placeholder')).toBe(
+ 'What needs to be done?'
+ ); [^2]
+});
+```
+[^1]: Find the `input` element with class `new-todo`.
+[^2]: Use the assertion library provided by the Synthetics agent to check that
+the value of the `placeholder` attribute matches a specific string.
+
+
+
+## Make API requests
+
+You can use the `request` parameter to make API requests independently of browser interactions.
+For example, you could retrieve a token from an HTTP endpoint and use it in a subsequent webpage request.
+
+```js
+step('make an API request', async () => {
+ const response = await request.get(params.url);
+ // Do something with the response
+})
+```
+
+The Elastic Synthetics `request` parameter is similar to [other request objects that are exposed by Playwright](https://playwright.dev/docs/api/class-apirequestcontext)
+with a few key differences:
+
+* The Elastic Synthetics `request` parameter comes built into the library so it doesn't
+ have to be imported separately, which reduces the amount of code needed and allows you to
+ make API requests in inline journeys.
+
+* The top level `request` object exposed by Elastic Synthetics has its own isolated cookie storage
+ unlike Playwright's `context.request` and `page.request`, which share cookie storage
+ with the corresponding [`BrowserContext`](https://playwright.dev/docs/api/class-browsercontext).
+
+* If you want to control the creation of the `request` object, you can do so by passing options
+ via `--playwright-options` or in the
+ `synthetics.config.ts` file.
+
+For a full example that shows how to use the `request` object, refer to the [Elastic Synthetics demo repository](https://github.com/elastic/synthetics-demo/blob/main/advanced-examples/journeys/api-requests.journey.ts).
+
+
+The `request` parameter is not intended to be used for writing pure API tests. Instead, it is a way to support
+writing plain HTTP requests in service of a browser-based test.
+
+
+
+
+## Set up and remove a global state
+
+If there are any actions that should be done before or after journeys, you can use `before`, `beforeAll`, `after`, or `afterAll`.
+
+To set up global state or a server that will be used for a **single** `journey`, for example,
+use a `before` hook. To perform this setup once before **all** journeys, use a `beforeAll` hook.
+
+```js
+before(({ params }) => {
+ // Actions to take
+});
+
+beforeAll(({ params }) => {
+ // Actions to take
+});
+```
+
+You can clean up global state or close a server used for a **single** `journey` using an `after` hook.
+To perform this cleanup once after all journeys, use an `afterAll` hook.
+
+```js
+after(({ params }) => {
+ // Actions to take
+});
+
+afterAll(({ params }) => {
+ // Actions to take
+});
+```
+
+
+
+## Import NPM packages
+
+You can import and use other NPM packages inside journey code.
+Refer to the example below using the external NPM package `is-positive`:
+
+```js
+import { journey, step, monitor, expect } from '@elastic/synthetics';
+import isPositive from 'is-positive';
+
+journey('bundle test', ({ page, params }) => {
+ step('check if positive', () => {
+ expect(isPositive(4)).toBe(true);
+ });
+});
+```
+
+When you create a monitor from a journey that uses
+external NPM packages, those packages will be bundled along with the
+journey code when the `push` command is invoked.
+
+However there are some limitations when using external packages:
+
+* Bundled journeys after compression should not be more than 800 Kilobytes.
+* Native node modules will not work as expected due to platform inconsistency.
+
+
+
+## Sample synthetic test
+
+A complete example of a basic synthetic test might look like this:
+
+```js
+import { journey, step, expect } from '@elastic/synthetics';
+
+journey('Ensure placeholder is correct', ({ page }) => {
+ step('Load the demo page', async () => {
+ await page.goto('https://elastic.github.io/synthetics-demo/');
+ });
+ step('Assert placeholder text', async () => {
+ const placeholderValue = await page.getAttribute(
+ 'input.new-todo',
+ 'placeholder'
+ );
+ expect(placeholderValue).toBe('What needs to be done?');
+ });
+});
+```
+
+You can find more complex examples in the [Elastic Synthetics demo repository](https://github.com/elastic/synthetics-demo/blob/main/advanced-examples/journeys/api-requests.journey.ts).
+
+
+
+## Test locally
+
+As you write journeys, you can run them locally to verify they work as expected. Then, you can create monitors to run your journeys at a regular interval.
+
+To test all the journeys in a Synthetics project, navigate into the directory containing the Synthetics project and run the journeys in there.
+By default, the `@elastic/synthetics` runner will only run files matching the filename `*.journey.(ts|js)*`.
+
+```sh
+# Run tests on the current directory. The dot `.` indicates
+# that it should run all tests in the current directory.
+npx @elastic/synthetics .
+```
+
+
+
+### Test an inline monitor
+
+To test an inline monitor's journey locally, pipe the inline journey into the `npx @elastic/synthetics` command.
+
+Assume, for example, that your inline monitor includes the following code:
+
+```js
+step('load homepage', async () => {
+ await page.goto('https://www.elastic.co');
+});
+step('hover over products menu', async () => {
+ await page.hover('css=[data-nav-item=products]');
+});
+```
+
+To run that journey locally, you can save that code to a file and pipe the file's contents into `@elastic-synthetics`:
+
+```sh
+cat path/to/sample.js | npx @elastic/synthetics --inline
+```
+
+And you'll get a response like the following:
+
+```sh
+Journey: inline
+ ✓ Step: 'load homepage' succeeded (1831 ms)
+ ✓ Step: 'hover over products menu' succeeded (97 ms)
+
+ 2 passed (2511 ms)
+```
diff --git a/docs/en/serverless/synthetics/synthetics-feature-roles.mdx b/docs/en/serverless/synthetics/synthetics-feature-roles.mdx
new file mode 100644
index 0000000000..c4bab058b3
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-feature-roles.mdx
@@ -0,0 +1,46 @@
+---
+id: serverlessObservabilitySyntheticsFeatureRoles
+slug: /serverless/observability/synthetics-feature-roles
+title: Grant users access to secured resources
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+You can use role-based access control to grant users access to secured
+resources. The roles that you set up depend on your organization's security
+requirements and the minimum privileges required to use specific features.
+
+
+
+ Viewer
+
+ * View and create visualizations that access Synthetics data.
+
+
+
+ Editor
+
+ * Create, modify, and delete monitors.
+ * View and create visualizations that access Synthetics data.
+
+
+
+ Admin
+
+ * Full access to project management, properties, and security privileges.
+ * Create, modify, and delete monitors.
+ * View and create visualizations that access Synthetics data.
+
+
+
+
+Read more about user roles in .
diff --git a/docs/en/serverless/synthetics/synthetics-get-started-project.mdx b/docs/en/serverless/synthetics/synthetics-get-started-project.mdx
new file mode 100644
index 0000000000..a48fbd6f4c
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-get-started-project.mdx
@@ -0,0 +1,209 @@
+---
+id: serverlessObservabilitySyntheticsGetStartedProject
+slug: /serverless/observability/synthetics-get-started-project
+title: Create monitors with a Synthetics project
+# description: Description to be written
+tags: []
+---
+
+
+
+A Synthetics project is the most powerful and sophisticated way to configure synthetic monitors.
+A Synthetics project lets you define your infrastructure as code, more commonly known as IaaC or Git-ops.
+With monitors created and managed in Synthetics projects, you organize your YAML configuration and
+JavaScript- or TypeScript-defined monitors on the filesystem, use Git for version control,
+and deploy via a CLI tool, usually executed on a CI/CD platform.
+
+
+
+This is one of two approaches you can use to set up a synthetic monitor.
+
+## Prerequisites
+
+You must be signed in as a user with Editor access.
+{/* and Monitor Management must be enabled by an administrator as described in Setup role. */}
+
+Working with a Synthetics project requires working with the Elastic Synthetics CLI tool, which
+can be invoked via the `npx @elastic/synthetics` command. Before getting started
+you'll need to:
+
+1. Install [Node.js](https://nodejs.dev/en/)
+1. Install the package:
+
+ ```sh
+ npm install -g @elastic/synthetics
+ ```
+
+1. Confirm your system is setup correctly:
+
+ ```sh
+ npx @elastic/synthetics -h
+ ```
+
+You should also decide where you want to run the monitors before getting started.
+You can run monitors in Synthetics projects on one or both of the following:
+
+* **Elastic's global managed testing infrastructure**:
+ With Elastic's global managed testing infrastructure, you can create and run monitors in multiple
+ locations without having to manage your own infrastructure.
+ Elastic takes care of software updates and capacity planning for you.
+
+* **((private-location))s**: ((private-location))s allow you to run monitors from your own premises.
+ To use ((private-location))s you must create a ((private-location)) before continuing.
+ For step-by-step instructions, refer to Monitor resources on private networks.
+
+## Create a Synthetics project
+
+Start by creating your first Synthetics project. Run the command below to create a new
+Synthetics project named `synthetic-project-test` in the current directory.
+
+```sh
+npx @elastic/synthetics init synthetic-project-test
+```
+
+Then, follow the prompts on screen to set up the correct default variables for your Synthetics project.
+When complete, set the `SYNTHETICS_API_KEY` environment variable in your terminal, which is used
+to connect to your Observability project:
+
+1. To generate an API key:
+ 1. Go to **Synthetics** in your Observability project.
+ 1. Click **Settings**.
+ 1. Switch to the **Project API Keys** tab.
+ 1. Click **Generate Project API key**.
+
+
+
+ To generate a Project API key, you must be logged in as a user with Editor access.
+
+
+
+ ![Project API Keys tab in Synthetics settings](../images/synthetics-monitor-management-api-key.png)
+
+
+
+ To use an API key to push to Elastic's global managed testing infrastructure,
+ the _Elastic managed locations enabled_ toggle must be on when generating the API key.
+ If the _Elastic managed locations enabled_ toggle is disabled, an administrator has restricted
+ access to Elastic's global managed testing infrastructure.
+ {/* Read more in the writer role documentation. */}
+
+
+
+1. Set the `SYNTHETICS_API_KEY` environment variable in your terminal.
+ You will most likely want to set this permanently.
+ This is done differently in [Powershell](https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-7.2#saving-changes-to-environment-variables) and [Bash](https://unix.stackexchange.com/a/117470).
+
+Then, take a look at key files and directories inside your Synthetics project:
+
+* `journeys` is where you'll add `.ts` and `.js` files defining your browser monitors.
+ When you create a new Synthetics project, this directory will contain files defining sample monitors.
+* `lightweight` is where you'll add `.yaml` files defining your lightweight monitors.
+When you create a new Synthetics project, this directory will contain a file defining sample monitors.
+* `synthetics.config.ts` contains settings for your Synthetics project.
+ When you create a new Synthetics project, it will contain some basic configuration options that you can customize later.
+
+
+
+ The `synthetics.config.ts` in the sample Synthetics project uses a location on Elastic's global managed testing infrastructure.
+ Administrators can restrict access to Elastic's global managed testing infrastructure.
+ When you attempt to `push` the sample monitors,
+ if you see an error stating that you don't have permission to use Elastic managed global locations,
+ refer to the troubleshooting guide for guidance.
+
+
+
+* `package.json` contains NPM settings for your Synthetics project. Learn more in the [NPM documentation](https://docs.npmjs.com/about-packages-and-modules).
+* `.github` contains sample workflow files to use with GitHub Actions.
+
+## Examine sample monitors
+
+Inside the `lightweight` directory you'll find sample lightweight monitors.
+Here's an example of a YAML file defining a lightweight monitor:
+
+```yml
+# lightweight.yml
+heartbeat.monitors:
+- type: http
+ name: Todos Lightweight
+ id: todos-lightweight
+ urls: "https://elastic.github.io/synthetics-demo/"
+ schedule: '@every 1m'
+```
+
+For more details on lightweight monitor configuration options,
+refer to Configure lightweight monitors.
+
+Inside the `journeys` directory you'll find sample browser monitors.
+Here's an example of a TypeScript file defining a browser monitor:
+
+```ts
+// example.journey.ts
+import { journey, step, monitor, expect } from '@elastic/synthetics';
+journey('My Example Journey', ({ page, params }) => {
+ // Only relevant for the push command to create
+ // monitors in your Observability project
+ monitor.use({
+ id: 'example-monitor',
+ schedule: 10,
+ });
+ step('launch application', async () => {
+ await page.goto(params.url);
+ });
+ step('assert title', async () => {
+ const header = await page.locator('h1');
+ expect(await header.textContent()).toBe('todos');
+ });
+});
+```
+
+For more details on writing journeys and configuring browser monitors,
+refer to Scripting browser monitors.
+
+## Test and connect to your Observability project
+
+While inside the Synthetics project directory you can do two things with the `npx @elastic/synthetics` command:
+
+* Test browser-based monitors locally. To run all journeys defined in `.ts` and `.js` files:
+
+ ```sh
+ npx @elastic/synthetics journeys
+ ```
+
+* Push all monitor configurations to an Elastic Observability project.
+ Run the following command from inside your Synthetics project directory:
+
+ ```sh
+ npx @elastic/synthetics push --auth $SYNTHETICS_API_KEY --url
+ ```
+
+One monitor will appear in the Synthetics UI for each journey or
+lightweight monitor, and you'll manage all monitors from your local environment.
+For more details on using the `push` command, refer to `@elastic/synthetics push`.
+
+
+
+If you've added a ((private-location)),
+you can `push` to that ((private-location)).
+
+To list available ((private-location))s,
+run the `elastic-synthetics locations` command
+with the URL for the Observability project from which to fetch available locations.
+
+
+
+## View in your Observability project
+
+Then, go to **Synthetics** in your Observability project. You should see your newly pushed monitors running.
+You can also go to the **Management** tab to see the monitors' configuration settings.
+
+
+ When a monitor is created or updated, the first run might not occur immediately, but the time it takes for the first run to occur will be less than the monitor's configured frequency. For example, if you create a monitor and configure it to run every 10 minutes, the first run will occur within 10 minutes of being created. After the first run, the monitor will begin running regularly based on the configured frequency. You can run a manual test if you want to see the results more quickly.
+
+
+## Next steps
+
+Learn more about:
+
+* Configuring lightweight monitors
+* Configuring browser monitors
+* Implementing best practices for working with Synthetics projects
diff --git a/docs/en/serverless/synthetics/synthetics-get-started-ui.mdx b/docs/en/serverless/synthetics/synthetics-get-started-ui.mdx
new file mode 100644
index 0000000000..e5beda9372
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-get-started-ui.mdx
@@ -0,0 +1,134 @@
+---
+id: serverlessObservabilitySyntheticsGetStartedUi
+slug: /serverless/observability/synthetics-get-started-ui
+title: Create monitors in the Synthetics UI
+# description: Description to be written
+tags: []
+---
+
+
+
+import GlobalManagedPaidFor from '../transclusion/synthetics/global-managed-paid-for.mdx'
+
+You can create synthetic monitors directly in the UI by opening an Observability project and navigating to **Synthetics**.
+
+
+
+This is one of two approaches you can use to set up a synthetic monitor.
+
+## Prerequisites
+
+You must be signed in as a user with Editor access.
+{/* and Monitor Management must be enabled by an administrator as described in Setup role. */}
+
+
+
+You should decide where you want to run the monitors before getting started.
+You can run monitors on one or both of the following:
+
+* **Elastic's global managed testing infrastructure**:
+ With Elastic's global managed testing infrastructure, you can create and run monitors in multiple
+ locations without having to manage your own infrastructure.
+ Elastic takes care of software updates and capacity planning for you.
+
+* **((private-location))s**: ((private-location))s allow you to run monitors from your own premises.
+ To use ((private-location))s you must create a ((private-location)) before continuing.
+ For step-by-step instructions, refer to Monitor resources on private networks.
+
+
+
+## Add a lightweight monitor
+
+To use the UI to add a lightweight monitor:
+
+1. Go to **Synthetics** in your Observability project.
+1. Click **Create monitor**.
+1. Set the monitor type to **HTTP Ping**, **TCP Ping**, or **ICMP Ping**.
+1. In _Locations_, select one or more locations.
+
+
+ If you don't see any locations listed, refer to the
+ troubleshooting guide for guidance.
+
+
+
+ If you've added a ((private-location)),
+ you'll see your the ((private-location)) in the list of _Locations_.
+
+ ![Screenshot of Monitor locations options including a ((private-location))](../images/private-locations-monitor-locations.png)
+
+
+1. Set the _Frequency_, and configure the monitor as needed.
+1. Click **Advanced options** to see more ways to configure your monitor.
+1. (Optional) Click **Run test** to verify that the test is valid.
+1. Click **Create monitor**.
+
+ ![Synthetics Create monitor UI](../images/synthetics-get-started-ui-lightweight.png)
+
+## Add a browser monitor
+
+You can also create a browser monitor in the UI using an **Inline script**.
+
+An inline script contains a single journey that you manage individually.
+Inline scripts can be quick to set up, but can also be more difficult to manage.
+Each browser monitor configured using an inline script can contain only _one_ journey,
+which must be maintained directly in the UI.
+
+If you depend on external packages, have your journeys next to your code repository,
+or want to embed and manage more than one journey from a single monitor configuration,
+use a Synthetics project instead.
+
+To use the UI to add a browser monitor:
+
+1. Click **Create monitor**.
+1. Set the monitor type to **Multistep**.
+1. In _Locations_, select one or more locations.
+
+
+ If you don't see any locations listed, refer to the
+ troubleshooting guide for guidance.
+
+
+1. Set the _Frequency_.
+1. Add steps to the **Script editor** code block directly.
+ The `journey` keyword isn't required, and variables like `page` and `params` will be part of your script's scope.
+ You cannot `import` any dependencies when using inline browser monitors.
+
+ ![Configure a synthetic monitor using an inline script](../images/synthetics-ui-inline-script.png)
+
+
+ Alternatively, you can use the **Script recorder** option.
+ You can use the Elastic Synthetics Recorder to interact with a web page, export
+ journey code that reflects all the actions you took, and upload the results in the UI.
+ For more information, refer to Use the Synthetics Recorder.
+
+
+1. Click **Advanced options** to see more ways to configure your monitor.
+
+ * Use **Data options** to add context to the data coming from your monitors.
+ * Use the **Synthetics agent options** to provide fine-tuned configuration for the synthetics agent.
+ Read more about available options in Use the Synthetics CLI.
+
+1. (Optional) Click **Run test** to verify that the test is valid.
+1. Click **Create monitor**.
+
+## View in your Observability project
+
+Navigate to **Synthetics** in your Observability project, where you can see screenshots of each run,
+set up alerts in case of test failures, and more.
+
+If a test does fail (shown as `down` in the Synthetics UI), you'll be able to view the step script that failed,
+any errors, and a stack trace.
+For more information, refer to Analyze data from synthetic monitors.
+
+
+ When a monitor is created or updated, the first run might not occur immediately, but the time it takes for the first run to occur will be less than the monitor's configured frequency. For example, if you create a monitor and configure it to run every 10 minutes, the first run will occur within 10 minutes of being created. After the first run, the monitor will begin running regularly based on the configured frequency. You can run a manual test if you want to see the results more quickly.
+
+
+## Next steps
+
+Learn more about:
+
+* Writing user journeys to use as inline scripts
+* Using the Synthetics Recorder
+* Configuring lightweight monitors
diff --git a/docs/en/serverless/synthetics/synthetics-get-started.mdx b/docs/en/serverless/synthetics/synthetics-get-started.mdx
new file mode 100644
index 0000000000..4e331a5712
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-get-started.mdx
@@ -0,0 +1,42 @@
+---
+id: serverlessObservabilitySyntheticsGetStarted
+slug: /serverless/observability/synthetics-get-started
+title: Get started
+# description: Description to be written
+tags: []
+---
+
+
+
+To set up a synthetic monitor, you need to configure the monitor, run it, and send data back to Elastic.
+After setup is complete, the data will be available in your Observability project to view, analyze, and alert on.
+
+There are two ways to set up a synthetic monitor:
+
+* Synthetics project
+* The Synthetics UI
+
+Read more about each option below, and choose the approach that works best for you.
+
+## Synthetics project
+
+With a Synthetics project, you write tests in an external version-controlled Node.js project
+using YAML for lightweight monitors and JavaScript or TypeScript for browser monitors.
+Then, you use the `@elastic/synthetics` NPM library's `push` command to create
+monitors in your Observability project.
+
+This approach works well if you want to create both browser monitors and lightweight
+monitors. It also allows you to configure and update monitors using a GitOps workflow.
+
+Get started in Create monitors in a Synthetics project.
+
+
+
+## Synthetics UI
+
+You can create monitors directly in the user interface.
+This approach works well if you want to create and manage your monitors in the browser.
+
+Get started in Create monitors in the Synthetics UI.
+
+
diff --git a/docs/en/serverless/synthetics/synthetics-intro.mdx b/docs/en/serverless/synthetics/synthetics-intro.mdx
new file mode 100644
index 0000000000..08ebc42de3
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-intro.mdx
@@ -0,0 +1,59 @@
+---
+id: serverlessObservabilityMonitorSynthetics
+slug: /serverless/observability/monitor-synthetics
+title: Synthetic monitoring
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+The Synthetics UI is for viewing result data from monitors created and managed
+directly in the Synthetics UI or managed externally
+using a Synthetics project.
+This can include both lightweight and browser-based monitors, and can include monitors
+running from either Elastic's global managed testing infrastructure or from
+((private-location))s.
+
+
+
+Synthetics periodically checks the status of your services and applications.
+Monitor the availability of network endpoints and services using the following types of monitors:
+
+* Lightweight HTTP/S, TCP, and ICMP monitors
+* Browser monitors
+
+![Synthetics UI](../images/synthetics-monitor-page.png)
+
+## Lightweight HTTP/S, TCP, and ICMP monitors
+
+You can monitor the status of network endpoints using the following lightweight checks:
+
+{/* lint ignore v4 v6 */}
+| | |
+|---|---|
+| **HTTP monitor** | Monitor your website. The HTTP monitor checks to make sure specific endpoints return the correct status code and display the correct text. |
+| **ICMP monitor** | Check the availability of your hosts. The ICMP monitor uses ICMP (v4 and v6) Echo Requests to check the network reachability of the hosts you are pinging. This will tell you whether the host is available and connected to the network, but doesn't tell you if a service on the host is running or not. |
+| **TCP monitor** | Monitor the services running on your hosts. The TCP monitor checks individual ports to make sure the service is accessible and running. |
+
+To set up your first monitor, refer to Get started.
+
+## Browser monitors
+
+Real browser synthetic monitoring enables you to test critical actions and requests that an end-user would make
+on your site at predefined intervals and in a controlled environment.
+Synthetic monitoring extends traditional end-to-end testing techniques because it allows your tests to run continuously on the cloud.
+The result is rich, consistent, and repeatable data that you can trend and alert on.
+
+For example, you can test popular user journeys, like logging in, adding items to a cart, and checking
+out — actions that need to work for your users consistently.
+
+You can run an automated Synthetics project on a real Chromium browser and
+view each synthetic monitoring journey in your Observability project side-by-side with your other monitors.
+
+Alerting helps you detect degraded performance or broken actions before your users do.
+By receiving alerts early, you can fix issues before they impact your bottom line or customer experience.
+
+To set up your first monitor, refer to Get started.
diff --git a/docs/en/serverless/synthetics/synthetics-journeys.mdx b/docs/en/serverless/synthetics/synthetics-journeys.mdx
new file mode 100644
index 0000000000..b5e5d748fd
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-journeys.mdx
@@ -0,0 +1,30 @@
+---
+id: serverlessObservabilitySyntheticsJourneys
+slug: /serverless/observability/synthetics-journeys
+title: Scripting browser monitors
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+Browser monitors are a type of synthetic monitor.
+Synthetic monitoring extends traditional end-to-end testing techniques because it allows your tests to run continuously on the cloud.
+With synthetic monitoring, you can assert that your application continues to work after a deployment by reusing
+the same journeys that you used to validate the software on your machine.
+
+You can use synthetic monitors to detect bugs caused by invalid states you couldn't predict and didn't write tests for.
+Synthetic monitors can also help you catch bugs in features that don't get much traffic by allowing you to periodically simulate users' actions.
+
+Start by learning the basics of synthetic monitoring, including how to:
+
+* Write a synthetic test
+* Test locally
+* Configure individual browser monitors
+* Work with params and secrets
+* Use the Synthetics Recorder
+
+![Diagram of the lifecycle of a synthetic monitor: write a test, test it locally, create a monitor, manage a monitor, delete a monitor](../images/synthetic-monitor-lifecycle.png)
+
diff --git a/docs/en/serverless/synthetics/synthetics-lightweight.mdx b/docs/en/serverless/synthetics/synthetics-lightweight.mdx
new file mode 100644
index 0000000000..f030dca442
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-lightweight.mdx
@@ -0,0 +1,233 @@
+---
+id: serverlessObservabilitySyntheticsLightweight
+slug: /serverless/observability/synthetics-lightweight
+title: Configure lightweight monitors
+# description: Description to be written
+tags: []
+---
+
+
+
+import LightweightConfigCommon from '../transclusion/synthetics/reference/lightweight-config/common.mdx'
+import LightweightConfigHttp from '../transclusion/synthetics/reference/lightweight-config/http.mdx'
+import LightweightConfigIcmp from '../transclusion/synthetics/reference/lightweight-config/icmp.mdx'
+import LightweightConfigTcp from '../transclusion/synthetics/reference/lightweight-config/tcp.mdx'
+
+
+
+Monitor the status of network endpoints using the following lightweight checks:
+
+* **HTTP**: Monitor your website. The HTTP monitor checks to make sure specific endpoints return the correct
+ status code and display the correct text.
+
+* **ICMP**: Check the availability of your hosts. The ICMP monitor uses ICMP (v4 and v6) Echo
+ Requests to check the network reachability of the hosts you are pinging. This will tell you whether the
+ host is available and connected to the network, but doesn't tell you if a service on the host is running or
+ not.
+
+* **TCP**: Monitor the services running on your hosts. The TCP monitor checks individual ports
+ to make sure the service is accessible and running.
+
+Lightweight monitors can be configured using either the Synthetics UI
+or a Synthetics project.
+
+
+
+## Synthetics UI
+
+To use the UI, go to **Synthetics** in your Observability project to create and configure monitors.
+For step-by-step instructions, refer to Create monitors in the Synthetics UI.
+
+![Synthetics Create monitor UI](../images/synthetics-get-started-ui-lightweight.png)
+
+## Synthetics project
+
+To use YAML files to create lightweight monitors in a Synthetics project, set up the Synthetics project
+and configure monitors in YAML files in the `lightweight` directory.
+
+In each YAML file, define a set of `monitors` to check your remote hosts.
+Each `monitor` item is an entry in a YAML list and begins with a dash (`-`).
+You can define the type of monitor to use, the hosts to check, and other
+optional settings.
+
+The following example configures three monitors checking via the `http`, `icmp`, and `tcp`
+protocols and demonstrates how to use TCP Echo response verification:
+
+```yaml
+heartbeat.monitors:
+- type: http
+ name: Todos Lightweight
+ id: todos-lightweight
+ urls: "https://elastic.github.io/synthetics-demo/"
+ schedule: '@every 1m'
+- type: icmp
+ id: ping-myhost
+ name: My Host Ping
+ hosts: "myhost"
+ schedule: '@every 5m'
+- type: tcp
+ id: myhost-tcp-echo
+ name: My Host TCP Echo
+ hosts: "myhost:777" # default TCP Echo Protocol
+ check.send: "Check"
+ check.receive: "Check"
+ schedule: '@every 60s'
+```
+
+
+
+There are some common monitor configuration options that are the same for all lightweight monitor types.
+For a complete list, refer to Common options.
+
+Each monitor type also has additional configuration options that are specific to that type.
+Refer to:
+
+* HTTP options
+* ICMP options
+* TCP options
+
+The `tcp` and `http` monitor types both support SSL/TLS and some proxy settings.
+
+
+
+### Common options
+
+You can specify the following options when defining a synthetic monitor in any location.
+These options are the same for all monitors. Each monitor type has additional configuration
+options that are specific to that monitor type.
+
+{/* Reference table */}
+
+
+
+
+### HTTP options
+
+The options described here configure Synthetics to connect via HTTP and
+optionally verify that the host returns the expected response.
+
+Valid options for HTTP monitors include all common options
+and the following HTTP-specific options:
+
+{/* Reference table */}
+
+
+
+
+### ICMP options
+
+The options described here configure Synthetics to use ICMP (v4 and v6) Echo
+Requests to check the configured hosts. On most platforms you must execute
+Synthetics with elevated permissions to perform ICMP pings.
+
+On Linux, regular users may perform pings if the right file capabilities are set. Run
+`sudo setcap cap_net_raw+eip /path/to/heartbeat` to grant Synthetics ping capabilities on Linux.
+Alternatively, you can grant ping permissions to the user being used to run Synthetics.
+To grant ping permissions in this way, run `sudo sysctl -w net.ipv4.ping_group_range='myuserid myuserid'`.
+
+Other platforms may require Synthetics to run as root or administrator to execute pings.
+
+Valid options for ICMP monitors include all common options
+and the following ICMP-specific options:
+
+{/* Reference table */}
+
+
+
+
+### TCP options
+
+The options described here configure Synthetics to connect via TCP and
+optionally verify the endpoint by sending and/or receiving a custom payload.
+
+Valid options for TCP monitors include all common options
+and the following TCP-specific options:
+
+{/* Reference table */}
+
+
+
+
+### Data types reference
+
+Values of configuration settings are interpreted as required by Synthetics.
+If a value can't be correctly interpreted as the required type - for example a
+string is given when a number is required - Synthetics will fail to start up.
+
+
+
+#### Boolean
+
+Boolean values can be either `true` or `false`. Alternative names for `true` are
+`yes` and `on`. Instead of `false` the values `no` and `off` can be used.
+
+```yaml
+enabled: true
+disabled: false
+```
+
+
+
+#### Number
+
+Number values require you to enter the number _without_ single or
+double quotes.
+
+```yaml
+integer: 123
+negative: -1
+float: 5.4
+```
+
+
+Some settings only support a restricted number range.
+
+
+
+
+#### String
+
+In [YAML](http://www.yaml.org), multiple styles of string definitions are supported:
+double-quoted, single-quoted, unquoted.
+
+The double-quoted style is specified by surrounding the string with `"`. This
+style provides support for escaping unprintable characters using `\`, but comes
+at the cost of having to escape `\` and `"` characters.
+
+The single-quoted style is specified by surrounding the string with `'`. This
+style supports no escaping (use `''` to quote a single quote). Only printable
+characters can be used when using this form.
+
+Unquoted style requires no quotes, but does not support any escaping and can't
+include any symbol that has a special meaning in YAML.
+
+
+Single-quoted style is recommended when defining regular expressions,
+event format strings, windows file paths, or non-alphabetical symbolic characters.
+
+
+
+
+#### Duration
+
+Durations require a numeric value with optional fraction and required unit.
+Valid time units are `ns`, `us`, `ms`, `s`, `m`, `h`. Sometimes features based
+on durations can be disabled by using zero or negative durations.
+
+```yaml
+duration1: 2.5s
+duration2: 6h
+duration_disabled: -1s
+```
+
+
+
+#### Regular expression
+
+Regular expressions are special strings that are compiled into regular
+expressions at load time.
+
+As regular expressions and YAML use `\` for escaping
+characters in strings, it's highly recommended to use single quoted strings when
+defining regular expressions. When single quoted strings are used, the `\` character
+is not interpreted by YAML parser as an escape symbol.
diff --git a/docs/en/serverless/synthetics/synthetics-manage-monitors.mdx b/docs/en/serverless/synthetics/synthetics-manage-monitors.mdx
new file mode 100644
index 0000000000..0395924e2e
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-manage-monitors.mdx
@@ -0,0 +1,99 @@
+---
+id: serverlessObservabilitySyntheticsManageMonitors
+slug: /serverless/observability/synthetics-manage-monitors
+title: Manage monitors
+# description: Description to be written
+tags: []
+---
+
+
+
+import SyntheticsManageMonitorsUpdateMonitorWidget from '../transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-widget.mdx'
+import SyntheticsManageMonitorsDeleteMonitorWidget from '../transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-widget.mdx'
+
+
+
+After you've created a synthetic monitor,
+you'll need to manage that monitor over time. This might include updating
+or permanently deleting an existing monitor.
+
+
+ If you're using a Synthetics project to manage monitors, you should also set up a workflow that uses
+ best practices for managing monitors effectively
+ in a production environment.
+
+
+
+
+## Update a monitor
+
+You can update a monitor's configuration, for example, changing the interval at which
+the monitor runs a test.
+
+You can also update the journey used in a browser monitor.
+For example, if you update the UI used in your application, you may want to update
+your journey's selectors and assertions.
+
+
+
+
+
+## Delete a monitor
+
+Eventually you might want to delete a monitor altogether.
+For example, if the user journey you were validating no longer exists.
+
+
+
+
+
+Alternatively, you can temporarily disable a monitor by updating the monitor's
+configuration in your journey's code or in the Synthetics UI using the _Enabled_ toggle.
+
+
+
+## Implement best practices for Synthetics projects
+
+
+This is only relevant to monitors created using a Synthetics project.
+
+
+After you've set up a Synthetics project,
+there are some best practices you can implement to manage the Synthetics project effectively.
+
+
+
+### Use version control
+
+First, it's recommended that you version control all files in Git.
+If your Synthetics project is not already in a version controlled directory add it
+and push it to your Git host.
+
+
+
+### Set up recommended workflow
+
+While it can be convenient to run the `push` command directly from your workstation,
+especially when setting up a new Synthetics project, it is not recommended for production environments.
+
+Instead, we recommended that you:
+
+1. Develop and test changes locally.
+1. Create a pull request for all config changes.
+1. Have your CI service automatically verify the PR by running `npx @elastic/synthetics .`
+
+ Elastic's synthetics runner can output results in a few different formats,
+ including JSON and JUnit (the standard format supported by most CI platforms).
+
+ If any of your journeys fail, it will yield a non-zero exit code, which most CI systems pick up as a failure by default.
+
+1. Have a human approve the pull request.
+1. Merge the pull request.
+1. Have your CI service automatically deploy the change by running `npx @elastic/synthetics push` after changes are merged.
+
+The exact implementation details will depend on the CI system and Git host you use.
+You can reference the sample GitHub configuration file that is included in the `.github`
+directory when you create a new Synthetics project.
+
+{/* or find an example in the */}
+{/* [elastic/synthetics-demo](https://github.com/elastic/synthetics-demo/blob/main/.github/workflows/run-synthetics.yml) repository. */}
diff --git a/docs/en/serverless/synthetics/synthetics-manage-retention.mdx b/docs/en/serverless/synthetics/synthetics-manage-retention.mdx
new file mode 100644
index 0000000000..9efe9633a0
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-manage-retention.mdx
@@ -0,0 +1,51 @@
+---
+id: serverlessObservabilitySyntheticsManageRetention
+slug: /serverless/observability/synthetics-manage-retention
+title: Manage data retention
+# description: Description to be written
+tags: []
+---
+
+
+
+When you set up a synthetic monitor, data from the monitor is saved in
+[((es)) data streams](((ref))/data-streams.html),
+an append-only structure in ((es)).
+
+There are six data streams recorded by synthetic monitors: `http`, `tcp`, `icmp`, `browser`, `browser.network`, `browser.screenshot`.
+Elastic will retain data from each data stream for some time period,
+and the default time period varies by data stream.
+If you want to reduce the amount of storage required or store data for longer,
+you can customize how long to retain data for each data stream.
+
+## Synthetics data streams
+
+There are six data streams recorded by synthetic monitors:
+
+| Data stream | Data includes | Default retention period | |
+|---|---|---|---|
+| `http` | The URL that was checked, the status of the check, and any errors that occurred | 1 year | |
+| `tcp` | The URL that was checked, the status of the check, and any errors that occurred | 1 year | |
+| `icmp` | The URL that was checked, the status of the check, and any errors that occurred | 1 year | |
+| `browser` | The URL that was checked, the status of the check, and any errors that occurred | 1 year | |
+| `browser.screenshot` | Binary image data used to construct a screenshot and metadata with information related to de-duplicating this data | 14 days | |
+| `browser.network` | Detailed metadata around requests for resources required by the pages being checked | 14 days | |
+
+All types of checks record core metadata.
+Browser-based checks store two additional types of data: network and screenshot documents.
+These browser-specific indices are usually many times larger than the core metadata.
+The relative sizes of each vary depending on the sites being
+checked with network data usually being the larger of the two by a significant factor.
+
+## Customize data stream lifecycles
+
+If Synthetics browser data streams are storing data longer than necessary,
+you can opt to retain data for a shorter period.
+
+To find Synthetics data streams:
+
+1. Navigate to **Project settings** → **Management** → **Index Management** → **Data Streams**.
+1. Filter the list of data streams for those containing the term `synthetics`.
+ 1. In the UI there will be three types of browser data streams: `synthetics-browser-*`, `synthetics-browser.network-*`, and `synthetics-browser.screenshot-*`.
+
+Then, you can refer to [Tutorial: Customize data retention for integrations](((fleet-guide))/data-streams-ilm-tutorial.html) to learn how to apply a custom ((ilm-init)) policy to the browser data streams.
diff --git a/docs/en/serverless/synthetics/synthetics-monitor-use.mdx b/docs/en/serverless/synthetics/synthetics-monitor-use.mdx
new file mode 100644
index 0000000000..a638d8d782
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-monitor-use.mdx
@@ -0,0 +1,65 @@
+---
+id: serverlessObservabilitySyntheticsMonitorUse
+slug: /serverless/observability/synthetics-monitor-use
+title: Configure individual browser monitors
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+
+
+This is only relevant for monitors that are created and managed using a Synthetics project.
+For more information on configuring browser monitors added in the UI,
+refer to Create monitors in the Synthetics UI.
+
+
+
+After writing synthetic journeys, you can use `monitor.use`
+to configure the browser monitors that will run your tests.
+
+You'll need to set a few configuration options:
+
+* **Give your monitor a name.** Provide a human readable name and a unique ID for the monitor. This will appear in your Observability project where you can view and manage monitors after they're created.
+* **Set the schedule.** Specify the interval at which your tests will run.
+* **Specify where the monitors should run.** You can run monitors on Elastic's global managed testing infrastructure
+ or create a ((private-location)) to run monitors from your own premises.
+
+* **Set other options as needed.** There are several other options you can set to customize your implementation including params, tags, screenshot options, throttling options, and more.
+
+Configure each monitor directly in your `journey` code using `monitor.use`.
+The `monitor` API allows you to set unique options for each journey's monitor directly through code.
+For example:
+
+```js
+import { journey, step, monitor, expect } from '@elastic/synthetics';
+
+journey('Ensure placeholder is correct', ({ page, params }) => {
+ monitor.use({
+ id: 'example-monitor',
+ schedule: 10,
+ throttling: {
+ download: 10,
+ upload: 5,
+ latency: 100,
+ },
+ });
+ step('Load the demo page', async () => {
+ await page.goto('https://elastic.github.io/synthetics-demo/');
+ });
+ step('Assert placeholder text', async () => {
+ const placeholderValue = await page.getAttribute(
+ 'input.new-todo',
+ 'placeholder'
+ );
+ expect(placeholderValue).toBe('What needs to be done?');
+ });
+});
+```
+
+For each journey, you can specify its `schedule` and the `locations` in which it runs.
+When those options are not set, Synthetics will use the default values in the global configuration file.
+For more details, refer to Configure a Synthetics project.
diff --git a/docs/en/serverless/synthetics/synthetics-params-secrets.mdx b/docs/en/serverless/synthetics/synthetics-params-secrets.mdx
new file mode 100644
index 0000000000..9f7d7492a0
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-params-secrets.mdx
@@ -0,0 +1,180 @@
+---
+id: serverlessObservabilitySyntheticsParamsSecrets
+slug: /serverless/observability/synthetics-params-secrets
+title: Work with params and secrets
+# description: Description to be written
+tags: []
+---
+
+
+
+{/* lint disable params */}
+
+
+Params allow you to use dynamically defined values in your synthetic monitors.
+For example, you may want to test a production website with a particular
+demo account whose password is only known to the team managing the synthetic monitors.
+
+For more information about security-sensitive use cases, refer to .
+
+
+
+## Define params
+
+Param values can be declared by any of the following methods:
+
+* In the _Global parameters_ tab of the Synthetics Settings page in an Observability project.
+* Declaring a default value for the parameter in a configuration file.
+* Passing the `--params` CLI argument.
+
+
+If you are creating and managing synthetic monitors using a
+Synthetics project, you can also use regular environment
+variables via the standard node `process.env` global object.
+
+
+The values in the configuration file are read in the following order:
+
+1. **Global parameters in an Observability project**: The _Global parameters_ set using the
+ Observability project's UI are read first.
+1. **Configuration file**: Then the _Global parameters_ are merged with any parameters defined in a configuration file.
+ If a parameter is defined in both the Observability project **and** a Synthetics project configuration file,
+ the value in the configuration file will be used.
+1. **CLI**: Then the parameters defined in the configuration are merged with any parameters passed to the CLI `--params` argument.
+ If a parameter is defined in a Synthetics project configuration file **and** using the CLI argument,
+ the value defined using the CLI will be used.
+ When running a script using the CLI, _Global parameters_ defined in the Observability project have no impact
+ on the test because it won't have access to the Observability project.
+
+### Global parameters in your Observability project
+
+From any page in the Observability project's **Synthetics** section:
+
+1. Go to **Settings**.
+1. Go to the **Global parameters** tab.
+1. Define parameters.
+
+![Global parameters tab on the Synthetics Settings page in an Observability project](../images/synthetics-params-secrets-kibana-define.png)
+
+
+
+### Synthetics project config file
+
+Use a `synthetics.config.js` or `synthetics.config.ts` file to define variables required by your tests.
+This file should be placed in the root of your Synthetics project.
+
+```js
+export default (env) => {
+ let my_url = "http://localhost:8080";
+ if (env === "production") {
+ my_url = "https://elastic.github.io/synthetics-demo/"
+ }
+ return {
+ params: {
+ my_url,
+ },
+ };
+};
+```
+
+The example above uses the `env` variable, which corresponds to the value of the `NODE_ENV` environment variable.
+
+
+
+### CLI argument
+
+To set parameters when running `npx @elastic/synthetics` on the command line,
+use the `--params` or `-p` flag. The provided map is merged over any existing variables defined in the `synthetics.config.{js,ts}` file.
+
+For example, to override the `my_url` parameter, you would run:
+
+```sh
+npx @elastic/synthetics . --params '{"my_url": "http://localhost:8080"}'
+```
+
+
+
+## Use params
+
+You can use params in both lightweight and browser monitors created in
+either a Synthetics project or the Synthetics UI in your Observability project.
+
+### In a Synthetics project
+
+For lightweight monitors in a Synthetics project, wrap the name of the param in `${}` (for example, `${my_url}`).
+
+```yaml
+- type: http
+ name: Todos Lightweight
+ id: todos-lightweight
+ urls: ["${my_url}"]
+ schedule: '@every 1m'
+```
+
+In browser monitors, parameters can be referenced via the `params` property available within the
+argument to a `journey`, `before`, `beforeAll`, `after`, or `afterAll` callback function.
+
+Add `params.` before the name of the param (for example, `params.my_url`):
+
+```js
+beforeAll(({params}) => {
+ console.log(`Visiting ${params.my_url}`)
+})
+
+journey("My Journey", ({ page, params }) => {
+ step('launch app', async () => {
+ await page.goto(params.my_url) [^1]
+ })
+})
+```
+[^1]: If you are using TypeScript, replace `params.my_url` with `params.my_url as string`.
+
+
+
+### In the UI
+
+To use a param in a lightweight monitor that is created in the Synthetics UI,
+wrap the name of the param in `${}` (for example, `${my_url}`).
+
+![Use a param in a lightweight monitor created in the Synthetics UI](../images/synthetics-params-secrets-kibana-use-lightweight.png)
+
+To use a param in a browser monitor that is created in the Synthetics UI,
+add `params.` before the name of the param (for example, `params.my_url`).
+
+![Use a param in a browser monitor created in the Synthetics UI](../images/synthetics-params-secrets-kibana-use-browser.png)
+
+
+
+## Working with secrets and sensitive values
+
+Your synthetics scripts may require the use of passwords or other sensitive secrets that are not known until runtime.
+
+
+
+Params are viewable in plain-text by administrators and other users with `all` privileges for
+the Synthetics app.
+Also note that synthetics scripts have no limitations on accessing these values, and a malicious script author could write a
+synthetics journey that exfiltrates `params` and other data at runtime.
+Do **not** use truly sensitive passwords (for example, an admin password or a real credit card)
+in **any** synthetics tools.
+Instead, set up limited demo accounts, or fake credit cards with limited functionality.
+If you want to limit access to parameters, ensure that users who are not supposed to access those values
+do not have `all` privileges for the Synthetics app, and that any scripts that use those values
+do not leak them in network requests or screenshots.
+
+
+
+If you are managing monitors with a Synthetics project, you can use environment variables
+in your `synthetics.config.ts` or `synthetics.config.js` file.
+
+The example below uses `process.env.MY_URL` to reference a variable named `MY_URL`
+defined in the environment and assigns its value to a param. That param can then
+be used in both lightweight and browser monitors that are managed in the Synthetics project:
+
+```js
+export default {
+ params: {
+ my_url: process.env.MY_URL
+ }
+};
+```
diff --git a/docs/en/serverless/synthetics/synthetics-private-location.mdx b/docs/en/serverless/synthetics/synthetics-private-location.mdx
new file mode 100644
index 0000000000..ce5e1ad7da
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-private-location.mdx
@@ -0,0 +1,184 @@
+---
+id: serverlessObservabilitySyntheticsPrivateLocation
+slug: /serverless/observability/synthetics-private-location
+title: Monitor resources on private networks
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+To monitor resources on private networks you can either:
+
+* Allow Elastic's global managed infrastructure to access your private endpoints.
+* Use ((agent)) to create a ((private-location)).
+
+((private-location))s via Elastic Agent require only outbound connections from your network,
+while allowing Elastic's global managed infrastructure to access a private endpoint requires
+inbound access, thus posing an additional risk that users must assess.
+
+
+
+## Allow access to your private network
+
+To give Elastic's global managed infrastructure access to a private endpoint, use IP address filtering, HTTP authentication, or both.
+
+To grant access via IP, use [this list of egress IPs](https://manifest.synthetics.elastic-cloud.com/v1/ip-ranges.json).
+The addresses and locations on this list may change, so automating updates to
+filtering rules is recommended. IP filtering alone will allow all users of Elastic's global managed infrastructure access to your endpoints, if this
+is a concern consider adding additional protection via user/password authentication via a proxy like nginx.
+
+
+
+## Monitor via a private agent
+
+((private-location))s allow you to run monitors from your own premises.
+Before running a monitor on a ((private-location)), you'll need to:
+
+* Set up ((agent)).
+* Connect ((fleet)) to your Observability project and enroll an ((agent)) in ((fleet)).
+* Add a ((private-location)) in the Synthetics UI.
+
+
+
+((private-location))s running through ((agent)) must have a direct connection to ((es)).
+Do not configure any ingest pipelines, or output via Logstash as this will prevent Synthetics from working properly and is not supported.
+
+
+
+
+
+## Set up ((agent))
+
+Start by setting up ((agent)) and creating an agent policy**. For more information on agent policies and creating them, refer to [((agent)) policy](((fleet-guide))/agent-policy.html#create-a-policy).
+
+
+
+A ((private-location)) should be set up against an agent policy that runs on a single ((agent)).
+The ((agent)) must be **enrolled in Fleet** (((private-location))s cannot be set up using **standalone** ((agents))).
+Do _not_ run the same agent policy on multiple agents being used for ((private-location))s, as you may
+end up with duplicate or missing tests. ((private-location))s do not currently load balance tests across
+multiple ((agents)). See Scaling ((private-location))s for information on increasing the capacity
+within a ((private-location)).
+
+By default ((private-location))s are configured to allow two simultaneous browser tests and an unlimited number of lightweight checks.
+As a result, if more than two browser tests are assigned to a particular ((private-location)), there may be a delay to run them.
+
+
+
+
+
+## Connect to your Observability project
+
+After setting up ((fleet)), you'll connect ((fleet)) to the your Observability project
+and enroll an ((agent)) in ((fleet)).
+
+
+
+Elastic provides Docker images that you can use to run ((fleet)) and an ((agent)) more easily.
+For monitors running on ((private-location))s, you _must_ use the `elastic-agent-complete`
+Docker image to create a self-hosted ((agent)) node. The standard ((ecloud)) or self-hosted
+((agent)) will not work.
+
+
+
+Version ((version)) has not yet been released.
+
+
+
+
+
+To pull the Docker image run:
+
+```sh
+docker pull docker.elastic.co/beats/elastic-agent-complete:((version))
+```
+
+
+
+Then enroll and run an ((agent)).
+You'll need an enrollment token and the URL of the ((fleet-server)).
+You can use the default enrollment token for your policy or create new policies
+and [enrollment tokens](((fleet-guide))/fleet-enrollment-tokens.html) as needed.
+
+For more information on running ((agent)) with Docker, refer to
+[Run ((agent)) in a container](((fleet-guide))/elastic-agent-container.html).
+
+
+
+Version ((version)) has not yet been released.
+
+
+
+
+
+```sh
+docker run \
+ --env FLEET_ENROLL=1 \
+ --env FLEET_URL={fleet_server_host_url} \
+ --env FLEET_ENROLLMENT_TOKEN={enrollment_token} \
+ --cap-add=NET_RAW \
+ --cap-add=SETUID \
+ --rm docker.elastic.co/beats/elastic-agent-complete:((version))
+```
+
+
+
+
+
+The `elastic-agent-complete` Docker image requires additional capabilities to operate correctly. Ensure
+`NET_RAW` and `SETUID` are enabled on the container.
+
+
+
+
+
+You may need to set other environment variables.
+Learn how in [((agent)) environment variables guide](((fleet-guide))/agent-environment-variables.html).
+
+
+
+
+
+## Add a ((private-location))
+
+When the ((agent)) is running you can add a new ((private-location)) in your Observability project's **Synthetics** section:
+
+1. Go to **Settings**.
+1. Go to the **((private-location))s** tab.
+1. Click **Add location**.
+1. Give your new location a unique _Location name_ and select the _Agent policy_ you created above.
+1. Click **Save**.
+
+
+ It is not currently possible to use custom CAs for synthetics browser tests in private locations without following a workaround.
+ To learn more about the workaround, refer to the following GitHub issue:
+ [elastic/synthetics#717](https://github.com/elastic/synthetics/issues/717).
+
+
+
+
+## Scaling ((private-location))s
+
+By default ((private-location))s are configured to allow two simultaneous browser tests, and an unlimited number of lightweight checks.
+These limits can be set via the environment variables `SYNTHETICS_LIMIT_{TYPE}`, where `{TYPE}` is one of `BROWSER`, `HTTP`, `TCP`, and `ICMP`
+for the container running the ((agent)) docker image.
+
+It is critical to allocate enough memory and CPU capacity to handle configured limits.
+Start by allocating at least 2 GiB of memory and two cores per browser instance to ensure consistent
+performance and avoid out-of-memory errors. Then adjust as needed. Resource requirements will vary depending on workload.
+Much less memory is needed for lightweight monitors. Start by allocating at least 512MiB of memory and two cores for
+lightweight checks. Then increase allocated memory and CPU based on observed usage patterns.
+
+These limits are for simultaneous tests, not total tests. For example, if
+60 browser tests were scheduled to run once per hour and each took 1 minute to run, that would fully occupy one execution slot.
+However, it is a good practice to set up execution slots with extra capacity. A good starting point would be to over-allocate by
+a factor of 5. In the previous example that would mean allocating 5 slots.
+
+
+
+## Next steps
+
+Now you can add monitors to your ((private-location)) in the Synthetics UI or using the Elastic Synthetics library's `push` method.
diff --git a/docs/en/serverless/synthetics/synthetics-recorder.mdx b/docs/en/serverless/synthetics/synthetics-recorder.mdx
new file mode 100644
index 0000000000..840bf6b28a
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-recorder.mdx
@@ -0,0 +1,150 @@
+---
+id: serverlessObservabilitySyntheticsRecorder
+slug: /serverless/observability/synthetics-recorder
+title: Use the Synthetics Recorder
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+
+As with any script recording technology, the Elastic Synthetics Recorder should be used as a tool to help create the main structure of the script. For simpler sites, you may be able to use the Synthetics Recorder's output directly to create a synthetic monitor, but for more complex and dynamic sites, or to limit flakiness, you'll likely need to edit its output before using it to create a monitor.
+
+
+You can use the Synthetics Recorder to write a synthetic test by interacting with a web page and exporting journey code that reflects all the actions you took.
+
+![Elastic Synthetics Recorder after recording a journey and clicking Export](../images/synthetics-create-test-script-recorder.png)
+
+
+
+## Set up
+
+For information on how to download the Elastic Synthetics Recorder, go to the [download page](https://github.com/elastic/synthetics-recorder/blob/main/docs/DOWNLOAD.md).
+
+
+
+# Record a journey
+
+To record a journey:
+
+1. Enter a starting URL in the search box. This URL will be the starting point of the journey script the recorder will create.
+1. Click **Start** or press Enter on your keyboard. This will launch a Chromium window open to the page you specified and start recording.
+1. Start interacting with the browser. This can include clicking on text, navigation, focusing on inputs like buttons and text fields, and more.
+ 1. (Optional) You can click **Pause** to temporarily stop recording actions while you continue to interact with the browser. Click again to start recording actions again. Note: It's especially important to test the journey if you paused recording at any point.
+1. When you're done interacting with the browser window, click **Stop** or close the browser to stop recording.
+
+
+
+# Edit a journey
+
+Once you've started recording, you can use the Synthetics Recorder UI to edit steps and individual actions before generating the journey code.
+You can also edit the journey after you've stopped recording.
+
+
+
+## Name steps
+
+Naming steps can help make the resulting journey code easier to understand.
+If you provide a step name, the name will be used in both the UI and the resulting code.
+If you don't name steps, the UI will show "Step 1", "Step 2", and so on, and the resulting code will use the first action in the step as the step text.
+
+To edit a step name:
+
+1. Hover over the current step name and click the pencil icon that appears.
+1. Edit the text in the text box.
+1. Click Return or Enter on your keyboard to save the updated name.
+
+
+
+## Split into multiple steps
+
+Steps represent groups of actions that should be completed in a specific order.
+Breaking a journey into steps can make it easier to read the resulting code.
+It can also make it easier to interpret results in the Synthetics UI since each step is
+displayed individually in the UI along with screenshots for convenient debugging and error tracking.
+
+By default, the Synthetics Recorder will group all actions in a single step,
+but you can break actions into any number of steps.
+
+To add a step:
+
+1. Click the plus icon between two actions to create a new step.
+1. (Optional) Consider naming the step.
+
+Use the trash can icon to delete the step divider, adding the actions from the deleted step into the previous step.
+
+
+
+## Edit or delete recorded actions
+
+You can fine-tune a journey by editing actions that were generated by the recorder.
+You can't change the type of command (for example, "click" or "navigate"), but you can change the value that is passed to the command.
+
+To edit an action:
+
+1. Hover over an action and click the pencil icon that appears.
+1. Edit the value as needed.
+1. Click **Save**.
+
+To delete an action:
+
+1. Hover over the action you want to delete and click the three dots for more options.
+1. Click **Delete action**.
+
+
+If you changed or deleted any actions to ensure the journey still works, it's especially important to test the journey.
+
+
+
+
+## Add assertions
+
+Assertions can play an important role in effective synthetic journeys by making determinations about the state of the page you are testing.
+This can include checking if an element is visible or checking the contents of a text field.
+You can't generate an assertion just from interacting with the browser window.
+Instead, you can add assertions between generated actions.
+
+To add an assertion:
+
+1. Find the generated action that should be done right before you want to assert a condition.
+1. Hover over that action and click the three dots for more options.
+1. Click **Add assertion**. This will add a new "assert" action in the UI.
+1. Provide the type of assertion, selector, and value.
+1. Click **Save**.
+
+
+If you added any assertions after you've finished recording to ensure the journey still works, it's especially important to test the journey.
+
+
+
+
+# Test the journey
+
+At any point during or after the recording process concludes, you can test your script.
+
+When you click the **Test** button, Elastic Synthetics will run the journey.
+As the test runs, the recorder will display results on a per-step basis.
+If there are any errors that prevent the journey from running, the recorder will display the relevant error message to help you debug.
+
+
+If you paused recording, updated actions, or added assertions manually in the recorder it is especially important that you test the journey to verify that the actions work in sequence.
+
+
+
+
+# Export
+
+When you are satisfied with journey you've created, you can export it from the recorder.
+
+Click **Export** to view the final journey code.
+From there you can use the code by:
+
+* Copy and pasting code containing all steps into a new or existing Synthetics project or an inline monitor.
+* Click **Export** to save a JavaScript file containing all steps.
+
+You can also check **Export as project** and either copy and paste or **Export**
+to get the full journey code including `journey` and imports for all dependencies.
+
diff --git a/docs/en/serverless/synthetics/synthetics-scale-and-architect.mdx b/docs/en/serverless/synthetics/synthetics-scale-and-architect.mdx
new file mode 100644
index 0000000000..42a71e2995
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-scale-and-architect.mdx
@@ -0,0 +1,27 @@
+---
+id: serverlessObservabilitySyntheticsScaleAndArchitect
+slug: /serverless/observability/synthetics-scale-and-architect
+title: Scale and architect a Synthetics deployment
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+Use these advanced considerations when using Elastic Synthetics
+for large and complex use cases.
+
+
+
+## Manage large numbers of Synthetic monitors with tags
+
+When managing larger numbers of synthetic monitors, use tags to keep them organized.
+Many of the views in the Synthetics UI are tag-aware and can group data by tag.
+
+
+
+## Create custom dashboards
+
+If we don't provide a UI for your exact needs, you can use dashboards to build custom visualizations.
diff --git a/docs/en/serverless/synthetics/synthetics-security-encryption.mdx b/docs/en/serverless/synthetics/synthetics-security-encryption.mdx
new file mode 100644
index 0000000000..2871abfb40
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-security-encryption.mdx
@@ -0,0 +1,34 @@
+---
+id: serverlessObservabilitySyntheticsSecurityEncryption
+slug: /serverless/observability/synthetics-security-encryption
+title: Synthetics Encryption and Security
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+Elastic Synthetics was designed with security in mind encrypting both persisted and transmitted data.
+This page catalogs the points within Elastic Synthetics where data is either stored or transmitted in an encrypted fashion.
+
+## Synthetics UI
+
+Data is stored in [Kibana Secure Saved Objects](((kibana-ref))/xpack-security-secure-saved-objects.html),
+with sensitive fields encrypted. These fields include your script source, params, and global params.
+
+
+
+## Synthetics Service
+
+The Global Elastic Synthetics Service performs all communication of sensitive data (both internally, and with Kibana) over encrypted connections
+and encrypts all data persisted to disk as well.
+
+
+
+## Synthetics Private Locations
+
+In Kibana configuration for private locations is stored in two places, Synthetics saved objects which always encrypt sensitive fields using [Kibana Secure Saved Objects](((kibana-ref))/xpack-security-secure-saved-objects.html) and also in Fleet, which uses unencrypted saved objects restricted by user permissions. For Elastic Cloud customers all data is secured on disk regardless of whether additional saved object encryption is present. See our [Cloud Security Statement](https://www.elastic.co/cloud/security) for more information. We recommend that self-managed customers encrypt disks for their Elasticsearch instances if this is a concern.
+
+All data is encrypted in transit. See [Elastic Agent configuration encryption](((fleet-guide))/_elastic_agent_configuration_encryption.html) for more details.
diff --git a/docs/en/serverless/synthetics/synthetics-settings.mdx b/docs/en/serverless/synthetics/synthetics-settings.mdx
new file mode 100644
index 0000000000..46609f117b
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-settings.mdx
@@ -0,0 +1,120 @@
+---
+id: serverlessObservabilitySyntheticsSettings
+slug: /serverless/observability/synthetics-settings
+title: Configure Synthetics settings
+# description: Description to be written
+tags: []
+---
+
+
+
+
+
+There are several Synthetics settings you can adjust in your Observability project.
+
+
+
+## Alerting
+
+Alerting enables you to detect complex conditions using **rules** across Observability apps
+and send a notification using **connectors**.
+
+When you create a new synthetic monitor, new default synthetics rules will be applied.
+To edit the default rules:
+
+1. Click **Alerts and rules** in the top bar.
+1. Select a rule to open a panel where you can edit the rule's configuration:
+ * **Monitor status rule** for receiving notifications for errors and outages.
+ * **TLS certificate rule** for receiving notifications when one or more of your HTTP or TCP
+ lightweight monitors has a TLS certificate expiring within a specified threshold or when
+ it exceeds an age limit.
+
+However, the automatically created Synthetics internal alert is intentionally preconfigured,
+and some configuration options can't be changed.
+For example, you can't change how often it checks the rule.
+
+If you need specific alerting behavior, set up a different rule.
+To view all existing rules or create a new rule:
+
+1. Click **Alerts and rules** in the top bar.
+1. Click **Manage rules** to go to the _Rules_ page.
+
+On the _Rules_ page, you can manage the default synthetics rules including snoozing rules,
+disabling rules, deleting rules, and more.
+
+![Rules page with default Synthetics rules](../images/synthetics-settings-disable-default-rules.png)
+
+
+
+You can enable and disable default alerts for individual monitors in a few ways:
+
+* In the UI when you create a monitor.
+* In the UI _after_ a monitor is already created, on the **Monitors** page
+ or on the **Edit monitor** page for the monitor.
+* In a Synthetics project when configuring a lightweight monitor.
+
+
+
+In the **Alerting** tab on the Synthetics Settings page, you can add and configure connectors.
+If you are running in Elastic Cloud, then an SMTP connector will automatically be configured,
+allowing you to easily set up email alerts.
+Read more about all available connectors in Action types.
+
+![Alerting tab on the Synthetics Settings page in an Observability project](../images/synthetics-settings-alerting.png)
+
+
+
+## ((private-location))s
+
+((private-location))s allow you to run monitors from your own premises.
+
+In the **((private-location))s** tab, you can add and manage ((private-location))s.
+After you Set up ((agent)) and Connect to your Observability project,
+this is where you will add the ((private-location)) so you can specify it as the location for
+a monitor created using the Synthetics UI or a Synthetics project.
+
+![((private-location))s tab on the Synthetics Settings page in an Observability project](../images/synthetics-settings-private-locations.png)
+
+
+
+## Global parameters
+
+Global parameters can be defined once and used across the configuration of lightweight and browser-based monitors.
+
+In the **Global parameters** tab, you can define variables and parameters.
+This is one of several methods you can use to define variables and parameters.
+To learn more about the other methods and which methods take precedence over others, see Work with params and secrets.
+
+![Global parameters tab on the Synthetics Settings page in an Observability project](../images/synthetics-settings-global-parameters.png)
+
+
+
+## Data retention
+
+When you set up a synthetic monitor, data from the monitor is saved in [Elasticsearch data streams](((ref))/data-streams.html),
+an append-only structure in Elasticsearch.
+You can customize how long synthetics data is stored by creating your own index lifecycle policy
+and attaching it to the relevant custom Component Template in Stack Management.
+
+In the **Data retention** tab, use the links to jump to the relevant policy for each data stream.
+Learn more about the data included in each data stream in Manage data retention.
+
+![Data retention tab on the Synthetics Settings page in an Observability project](../images/synthetics-settings-data-retention.png)
+
+
+
+## Project API keys
+
+Project API keys are used to push monitors created and managed in a Synthetics project remotely from a CLI or CD pipeline.
+
+In the **Project API keys** tab, you can generate API keys to use with your Synthetics project.
+Learn more about using API keys in Create monitors with a Synthetics project.
+
+
+
+To create a Project API key, you must be logged in as a user with
+Editor access.
+
+
+
+![Project API keys tab on the Synthetics Settings page in an Observability project](../images/synthetics-settings-api-keys.png)
diff --git a/docs/en/serverless/synthetics/synthetics-troubleshooting.mdx b/docs/en/serverless/synthetics/synthetics-troubleshooting.mdx
new file mode 100644
index 0000000000..a50a5c9f20
--- /dev/null
+++ b/docs/en/serverless/synthetics/synthetics-troubleshooting.mdx
@@ -0,0 +1,138 @@
+---
+id: serverlessObservabilitySyntheticsTroubleshooting
+slug: /serverless/observability/synthetics-troubleshooting
+title: Troubleshooting Synthetics
+# description: Description to be written
+tags: []
+---
+
+
+
+import Support from '../transclusion/support.mdx'
+
+
+
+
+
+## Local debugging
+
+For debugging synthetic tests locally, you can set an environment variable,
+`DEBUG=synthetics`, to capture Synthetics agent logs when using the
+Synthetics CLI.
+
+
+
+## Common issues
+
+
+
+### No results from a monitor configured to run on a ((private-location))
+
+If you have created a ((private-location)) and configured a monitor to run on that ((private-location)),
+but don't see any results for that monitor in the Synthetics UI, make sure there is an agent
+configured to run against the agent policy.
+
+
+
+If you attempt to assign an agent policy to a ((private-location)) _before_ configuring an agent to run
+against the agent policy, you will see a note in the Synthetics UI that the selected agent policy
+has no agents.
+
+
+
+When creating a ((private-location)), you have to:
+
+1. Set up ((agent)).
+1. Connect ((fleet)) to your Observability project and enroll an ((agent)) in ((fleet)).
+1. Add a ((private-location)) in the Synthetics UI.
+
+If you do not complete the second item, no agents will be configured to run against the agent policy, and
+any monitors configured to run on that ((private-location)) won't be able to run so there will be no results
+in the Synthetics UI.
+
+To fix this, make sure there is an agent configured to run against the agent policy.
+
+
+
+### No results from a monitor
+
+If you have configured a monitor but don't see any results for that monitor in the Synthetics UI, whether running them from Elastic's global managed testing infrastructure or from ((private-location))s, ensure Synthetics has a direct connection to ((es)).
+
+Do not configure any ingest pipelines or output via Logstash as this will prevent Synthetics from working properly and is not supported.
+
+
+
+### Browser monitor configured to run on a ((private-location)) not running to schedule
+
+If you have browser monitors configured to run on a ((private-location)) but notice one or more of them are not running as scheduled, this could be because:
+
+* The time it takes for your monitor to run is longer than the frequency you have set
+* There may be too many monitors trying to run concurrently, causing some of them to skip their scheduled run
+
+You may also see a message in the logs such as `2 tasks have missed their schedule deadlines by more than 1 second in the last 15s`. These will be visible from inside the Agent diagnostic ZIP file, and the numbers and time periods may be different in your logs.
+
+Start by identifying the cause of the issue. First, check if the time it takes the monitor to run is less than the scheduled frequency:
+
+1. Go to the Synthetics UI.
+1. Click the monitor, then click **Go to monitor**.
+1. Go to the Overview tab to see the _Avg. duration_. You can also view the duration for individual runs in the History tab.
+1. Compare the duration to the scheduled frequency. If the duration is _greater than_ the scheduled frequency, for example if the monitor that takes 90 seconds to run and its scheduled frequency is 1 minute, the next scheduled run will not occur because the current one is still running so you may see results for every other scheduled run.
+
+ To fix this, you can either:
+
+* Change the frequency so the monitor runs less often.
+* Refactor the monitor so it can run in a shorter amount of time.
+
+If the duration is _less than_ the scheduled frequency or the suggestion above does not fix the issue, then there may be too many browser monitors attempting to run on the ((private-location)). Due to the additional hardware overhead of running browser monitors, we limit each ((private-location)) to only run two browser monitors at the same time. Depending on how many browser monitors you have configured to run on the ((private-location)) and their schedule, the ((private-location)) may not be able to run them all because it would require more than two browser tests to be running simultaneously.
+
+To fix this issue, you can either:
+
+* Increase the number of concurrent browser monitors allowed (as described in Scaling Private Locations), paying attention to the scaling and hardware requirements documented.
+* Create multiple ((private-location))s and spread your browser monitors across them more evenly (effectively horizontally scaling your ((private-location))s).
+
+
+
+### No locations are available
+
+When using ((ecloud)), if there are no options available in the _Locations_ dropdown when you
+try to create a monitor in the Synthetics UI _or_ if no locations are listed when using the
+`location` command, it might be because you do not have permission to
+use Elastic managed locations _and_ there are no Private Locations
+available yet.
+
+There are a few ways to fix this:
+
+* If you have Editor access, you can create a new Private Location. Then try creating the monitor again.
+* If you do _not_ have the right privileges to create a Private Location, you can ask an Admin to create a Private Location or give you the necessary privileges so you can create a new Private Location. Then try creating the monitor again.
+{/* * If you want to create a monitor to run on Elastic's global managed infrastructure, ask an Admin to update `Synthetics and Uptime` sub-feature privileges for the role you're currently assigned. Then try creating the monitor again. */}
+
+
+
+{/* ### You do not have permission to use Elastic managed locations
+
+If you try to create or edit a monitor hosted on Elastic's global managed infrastructure but see a note that you do not have permission to use Elastic managed locations, an administrator has restricted the use of public locations.
+
+To fix this you can either:
+
+* Ask an Admin to update
+ `Synthetics and Uptime` sub-feature privileges for the role you're
+ currently assigned or assign you a role that allows using Elastic's global managed infrastructure.
+
+* Use a Private Location. */}
+
+
+
+## Get help
+
+
+
+### Elastic Support
+
+
+
+
+
+### Discussion forum
+
+For other questions and feature requests, visit our
+[discussion forum](((forum))/c/observability/synthetics/75).
diff --git a/docs/en/serverless/technical-preview-limitations.mdx b/docs/en/serverless/technical-preview-limitations.mdx
new file mode 100644
index 0000000000..fc3ecd036c
--- /dev/null
+++ b/docs/en/serverless/technical-preview-limitations.mdx
@@ -0,0 +1,11 @@
+---
+id: serverlessObservabilityTechnicalPreviewLimitations
+slug: /serverless/observability/observability-technical-preview-limitations
+title: Technical preview limitations
+description: Review the limitations that apply to Elastic Observability projects in technical preview.
+tags: [ 'serverless', 'observability' ]
+---
+
+
+
+Currently, the maximum ingestion rate for the Managed Intake Service (APM and OpenTelemetry ingest) is 11.5 MB/s of uncompressed data (roughly 1TB/d uncompressed equivalent). Ingestion at a higher rate may experience rate limiting or ingest failures.
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/about/go.mdx b/docs/en/serverless/transclusion/apm/guide/about/go.mdx
new file mode 100644
index 0000000000..111e0c9253
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/go.mdx
@@ -0,0 +1,44 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM Go agent**
+
+The Elastic APM Go agent enables you to trace the execution of operations in your [Go](https://golang.org/)
+applications.
+It has built-in support for popular frameworks and toolkits,
+like [Gorilla](http://www.gorillatoolkit.org/) and [Gin](https://gin-gonic.com/),
+as well as support for instrumenting Go's built-in [net/http](https://golang.org/pkg/net/http/),
+[database/sql](https://golang.org/pkg/database/sql/) drivers.
+
+The Agent includes instrumentation modules for supported technologies,
+each providing middleware or wrappers for recording interesting events, such as incoming HTTP requests, outgoing HTTP requests, and database queries.
+
+To collect data about incoming HTTP requests, install router middleware for one of the supported web frameworks.
+Incoming requests will be recorded as transactions, along with any related panics or errors.
+
+To collect data for outgoing HTTP requests, instrument an `http.Client` or `http.Transport` using `module/apmhttp`.
+To collect data about database queries, use `module/apmsql`,
+which provides instrumentation for well-known database drivers.
+
+In order to connect transactions with related spans and errors, and propagate traces between services (distributed tracing),
+the agent relies on Go's built-in [context](https://golang.org/pkg/context/) package:
+transactions and spans are stored in context objects.
+For example, for incoming HTTP requests, in-flight trace data will be recorded in the `context` object accessible through
+[net/http.Context](https://golang.org/pkg/net/http/#Request.Context).
+
+In addition to capturing events like those mentioned here,
+the agent also collects system and application metrics at regular intervals.
+This collection happens in a background goroutine that is automatically started when the agent is initialized.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [Go agent reference](((apm-go-ref))/introduction.html) for full documentation, including:
+
+* [Supported technologies](((apm-go-ref))/supported-tech.html)
+* [Set up](((apm-go-ref))/getting-started.html)
+* [Configuration reference](((apm-go-ref))/configuration.html)
+* [API reference](((apm-go-ref))/api.html)
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/about/java.mdx b/docs/en/serverless/transclusion/apm/guide/about/java.mdx
new file mode 100644
index 0000000000..bb7c5acbea
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/java.mdx
@@ -0,0 +1,26 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM Java agent**
+
+The Elastic APM Java agent auto-instruments supported technologies and records interesting events,
+like spans for database queries and transactions for incoming HTTP requests.
+To do this, it leverages the capability of the JVM to instrument the bytecode of classes.
+This means that for the supported technologies, there are no code changes required.
+
+Spans are grouped in transactions—by default, one for each incoming HTTP request.
+But it's possible to create custom transactions not associated with an HTTP request.
+Transactions and Spans are sent to Elastic, where they're transformed, stored, and ready to be visualized.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [Java agent reference](((apm-java-ref))/intro.html) for full documentation, including:
+
+* [Supported technologies](((apm-java-ref))/supported-technologies-details.html)
+* [Set up](((apm-java-ref))/setup.html)
+* [Configuration reference](((apm-java-ref))/configuration.html)
+* [API reference](((apm-java-ref))/apis.html)
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/about/net.mdx b/docs/en/serverless/transclusion/apm/guide/about/net.mdx
new file mode 100644
index 0000000000..4412ab2c2e
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/net.mdx
@@ -0,0 +1,28 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM .NET agent**
+
+The Elastic APM .NET agent auto-instruments supported technologies and records interesting events, like HTTP requests and database queries.
+To do this, it uses built-in capabilities of the instrumented frameworks like
+[Diagnostic Source](https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.diagnosticsource?view=netcore-3.0),
+an HTTP module for IIS, or
+[IDbCommandInterceptor](https://docs.microsoft.com/en-us/dotnet/api/system.data.entity.infrastructure.interception.idbcommandinterceptor?view=entity-framework-6.2.0) for Entity Framework.
+This means that for the supported technologies, there are no code changes required beyond enabling auto-instrumentation.
+
+The Agent automatically registers callback methods for built-in Diagnostic Source events.
+With this, the supported frameworks trigger Agent code for relevant events to measure their duration and collect metadata, like DB statements, as well as HTTP related information, like the URL, parameters, and headers.
+These events, called Transactions and Spans, are sent to Elastic, where they're transformed, stored, and ready to be visualized.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [.NET agent reference](((apm-dotnet-ref))/intro.html) for full documentation, including:
+
+* [Supported technologies](((apm-dotnet-ref))/supported-technologies.html)
+* [Set up](((apm-dotnet-ref))/setup.html)
+* [Configuration reference](((apm-dotnet-ref))/configuration.html)
+* [API reference](((apm-dotnet-ref))/public-api.html)
+
+
+
diff --git a/docs/en/serverless/transclusion/apm/guide/about/node.mdx b/docs/en/serverless/transclusion/apm/guide/about/node.mdx
new file mode 100644
index 0000000000..c79c741649
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/node.mdx
@@ -0,0 +1,26 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM Node.js agent**
+
+The Elastic APM Node.js agent auto-instruments supported frameworks and records interesting events,
+like HTTP requests and database queries. To do this, it patches modules as they are loaded to capture when module functions and callbacks are called. Additionally, there are some cases where a module will be patched to allow tracing context to be propagated through the asynchronous continuation.
+This means that for the supported technologies, there are no code changes required.
+
+The Agent automatically links module function calls to callback calls to measure their duration and metadata (like the DB statement),
+as well as HTTP-related information (like the URL, parameters, and headers).
+
+These events, called Transactions and Spans, are sent to Elastic, where they're transformed, stored, and ready to be visualized.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [Node.js agent reference](((apm-node-ref))/intro.html) for full documentation, including:
+
+* [Supported technologies](((apm-node-ref))/supported-technologies.html)
+* [Set up](((apm-node-ref))/set-up.html)
+* [Configuration reference](((apm-node-ref))/advanced-setup.html)
+* [API reference](((apm-node-ref))/api.html)
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/about/php.mdx b/docs/en/serverless/transclusion/apm/guide/about/php.mdx
new file mode 100644
index 0000000000..21e146b22a
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/php.mdx
@@ -0,0 +1,20 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM PHP agent**
+
+The Elastic APM PHP agent measures application performance and tracks errors.
+This extension must be installed in your PHP environment.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [PHP agent reference](((apm-php-ref))/intro.html) for full documentation, including:
+
+* [Supported technologies](((apm-php-ref))/supported-technologies.html)
+* [Set up](((apm-php-ref))/setup.html)
+* [Configuration reference](((apm-php-ref))/configuration.html)
+* [API reference](((apm-php-ref))/public-api.html)
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/about/python.mdx b/docs/en/serverless/transclusion/apm/guide/about/python.mdx
new file mode 100644
index 0000000000..065276a194
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/python.mdx
@@ -0,0 +1,32 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM Python agent**
+
+The Elastic APM Python agent has built-in support for Django and Flask performance metrics and error logging, as well as generic support of other WSGI frameworks for error logging.
+
+It instruments your application to collect APM events in a few different ways:
+
+To collect data about incoming requests and background tasks, the Agent integrates with supported technologies to make use of hooks and signals provided by the framework.
+These framework integrations require limited code changes in your application.
+
+To collect data from database drivers, HTTP libraries, and so on,
+Elastic APM agents instrument certain functions and methods in these libraries.
+Instrumentations are set up automatically and do not require any code changes.
+
+In addition to APM and error data,
+the Python agent also collects system and application metrics in regular intervals.
+This collection happens in a background thread that is started by the agent.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [Python agent reference](((apm-py-ref))/getting-started.html) for full documentation, including:
+
+* [Supported technologies](((apm-py-ref))/supported-technologies.html)
+* [Set up](((apm-py-ref))/set-up.html)
+* [Configuration reference](((apm-py-ref))/configuration.html)
+* [API reference](((apm-py-ref))/api.html)
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/about/ruby.mdx b/docs/en/serverless/transclusion/apm/guide/about/ruby.mdx
new file mode 100644
index 0000000000..13fccf8e34
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/about/ruby.mdx
@@ -0,0 +1,26 @@
+import APMAgentWarning from '../../../../partials/apm-agent-warning.mdx'
+
+**Elastic APM Ruby agent**
+
+The Elastic APM Ruby agent auto-instruments supported technologies and records interesting events,
+like HTTP requests and database queries. To do this, it uses relevant public APIs when they are provided by the libraries. Otherwise, it carefully wraps the necessary internal methods.
+This means that for the supported technologies, there are no code changes required.
+
+The APM agent automatically keeps track of queries to your data stores to measure their duration and metadata (like the DB statement),
+as well as HTTP-related information (like the URL, parameters, and headers).
+
+These events, called Transactions and Spans, are sent to Elastic, where they're transformed, stored, and ready to be visualized.
+
+**Learn more**
+
+If you're ready to give Elastic APM a try, see Get started with traces and APM.
+
+See the [Ruby agent reference](((apm-ruby-ref))/introduction.html) for full documentation, including:
+
+* [Supported technologies](((apm-ruby-ref))/supported-technologies.html)
+* [Set up](((apm-ruby-ref))/set-up.html)
+* [Configuration reference](((apm-ruby-ref))/configuration.html)
+* [API reference](((apm-ruby-ref))/api.html)
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/diagrams/apm-otel-architecture.mdx b/docs/en/serverless/transclusion/apm/guide/diagrams/apm-otel-architecture.mdx
new file mode 100644
index 0000000000..93ef2a916b
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/diagrams/apm-otel-architecture.mdx
@@ -0,0 +1,258 @@
+
+{/* ++++ */}
+{/*
*/}
+{/* */}
+{/* */}
+{/*
*/}
+{/* ++++ */}
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/go.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/go.mdx
new file mode 100644
index 0000000000..14deee9d2e
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/go.mdx
@@ -0,0 +1,41 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/go.mdx */}
+
+**1. Install the agent**
+
+Install the ((apm-agent)) package using `go get`:
+
+```go
+go get -u go.elastic.co/apm/v2
+```
+
+**2. Configure the agent**
+
+To simplify development and testing,
+the agent defaults to sending data to Elastic at `http://localhost:8200`.
+To send data to an alternative location, you must configure `ELASTIC_APM_SERVER_URL`.
+
+```go
+# The APM integration host and port
+export ELASTIC_APM_SERVER_URL=
+
+# If you do not specify `ELASTIC_APM_SERVICE_NAME`, the Go agent will use the
+# executable name. For example, if your executable is called "my-app.exe", then your
+# service will be identified as "my-app".
+export ELASTIC_APM_SERVICE_NAME=
+
+# Secret tokens are used to authorize requests to the APM integration
+export ELASTIC_APM_SECRET_TOKEN=
+```
+
+**3. Instrument your application**
+
+Instrumentation is the process of extending your application's code to report trace data to Elastic APM. Go applications must be instrumented manually at the source code level. To instrument your applications, use one of the following approaches:
+
+* [Built-in instrumentation modules](((apm-go-ref))/builtin-modules.html).
+* [Custom instrumentation](((apm-go-ref))/custom-instrumentation.html) and context propagation with the Go Agent API.
+
+**Learn more in the ((apm-agent)) reference**
+
+* [Supported technologies](((apm-go-ref))/supported-tech.html)
+* [Advanced configuration](((apm-go-ref))/configuration.html)
+* [Detailed guide to instrumenting Go source code](((apm-go-ref))/getting-started.html)
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/java.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/java.mdx
new file mode 100644
index 0000000000..3bcdbcf70e
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/java.mdx
@@ -0,0 +1,42 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/java.mdx */}
+
+Manually set up and configure the agent with the `-javaagent` JVM option. No application code change is required, but this requires an
+application restart. See below for more information on this setup method.
+
+**1. Download the ((apm-agent))**
+
+The first step in getting started with the Elastic APM Java agent is to retrieve a copy of the agent JAR.
+Java agent releases are published to [Maven central](https://repo.maven.apache.org/maven2/). In order to get a copy you can either:
+
+* download the [latest agent](https://oss.sonatype.org/service/local/artifact/maven/redirect?r=releases&g=co.elastic.apm&a=elastic-apm-agent&v=LATEST)
+or a [previous release](https://mvnrepository.com/artifact/co.elastic.apm/elastic-apm-agent) from Maven central.
+* download with `curl`:
+
+ ```bash
+ curl -o 'elastic-apm-agent.jar' -L 'https://oss.sonatype.org/service/local/artifact/maven/redirect?r=releases&g=co.elastic.apm&a=elastic-apm-agent&v=LATEST'
+ ```
+
+**2. Add `-javaagent` flag**
+
+When starting your application, add the JVM flag: `-javaagent:/path/to/elastic-apm-agent-.jar`.
+
+**3. Configure**
+
+Different application servers have different ways of setting the `-javaagent` flag and system properties.
+Start your application (for example a Spring Boot application or other embedded servers) and add the `-javaagent` JVM flag.
+Use the `-D` prefix to configure the agent using system properties:
+
+```bash
+java -javaagent:/path/to/elastic-apm-agent-.jar -Delastic.apm.service_name=my-cool-service -Delastic.apm.application_packages=org.example,org.another.example -Delastic.apm.server_url=http://127.0.0.1:8200 -jar my-application.jar
+```
+
+Refer to [Manual setup with `-javaagent` flag](((apm-java-ref))/setup-javaagent.html) to learn more.
+
+**Alternate setup methods**
+
+* **Automatic setup with `apm-agent-attach-cli.jar`**
+ Automatically set up the agent without needing to alter the configuration of your JVM or application server. This method requires no changes to application code
+ or JVM options, and allows attaching to a running JVM. Refer to the [Java agent documentation](((apm-java-ref))/setup-attach-cli.html) for more information on this setup method.
+* **Programmatic API setup to self-attach**
+ Set up the agent with a one-line code change and an extra `apm-agent-attach` dependency. This method requires no changes to JVM options, and
+ the agent artifact is embedded within the packaged application binary. Refer to the [Java agent documentation](((apm-java-ref))/setup-attach-api.html) for more information on this setup method.
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/net.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/net.mdx
new file mode 100644
index 0000000000..6bc72a769c
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/net.mdx
@@ -0,0 +1,20 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/net.mdx */}
+
+**Set up the ((apm-agent))**
+
+* **Profiler runtime instrumentation**:
+The agent supports auto instrumentation without any code change and without
+any recompilation of your projects. See [Profiler auto instrumentation](((apm-dotnet-ref))/setup-auto-instrumentation.html).
+* **NuGet packages**:
+The agent ships as a set of [NuGet packages](((apm-dotnet-ref))/packages.html) available on [nuget.org](https://nuget.org).
+You can add the Agent and specific instrumentations to a .NET application by
+referencing one or more of these packages and following the package documentation.
+* **Host startup hook**:
+On .NET Core 3.0+ or .NET 5+, the agent supports auto instrumentation without any code change and without
+any recompilation of your projects. See [Zero code change setup on .NET Core](((apm-dotnet-ref))t/setup-dotnet-net-core.html)
+for more details.
+
+**Learn more in the ((apm-agent)) reference**
+
+* [Supported technologies](((apm-dotnet-ref))/supported-technologies.html)
+* [Advanced configuration](((apm-dotnet-ref))/configuration.html)
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/node.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/node.mdx
new file mode 100644
index 0000000000..584f8c053f
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/node.mdx
@@ -0,0 +1,43 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/node.mdx */}
+
+**1. Install the ((apm-agent))**
+
+Install the ((apm-agent)) for Node.js as a dependency to your application.
+
+```js
+npm install elastic-apm-node --save
+```
+
+**2. Initialization**
+
+It's important that the agent is started before you require *any* other modules in your Node.js application - i.e. before `http` and before your router etc.
+
+This means that you should probably require and start the agent in your application's main file (usually `index.js`, `server.js` or `app.js`).
+
+Here's a simple example of how Elastic APM is normally required and started:
+
+```js
+// Add this to the VERY top of the first file loaded in your app
+var apm = require('elastic-apm-node').start({
+ // Override service name from package.json
+ // Allowed characters: a-z, A-Z, 0-9, -, _, and space
+ serviceName: '',
+
+ // Use if APM integration requires a token
+ secretToken: '',
+
+ // Use if APM integration uses API keys for authentication
+ apiKey: '',
+
+ // Set custom APM integration host and port (default: http://127.0.0.1:8200)
+ serverUrl: '',
+})
+```
+
+The agent will now monitor the performance of your application and record any uncaught exceptions.
+
+**Learn more in the ((apm-agent)) reference**
+
+* [Supported technologies](((apm-node-ref))/supported-technologies.html)
+* [Babel/ES Modules](((apm-node-ref))/advanced-setup.html)
+* [Advanced configuration](((apm-node-ref))/configuring-the-agent.html)
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/php.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/php.mdx
new file mode 100644
index 0000000000..66829ef901
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/php.mdx
@@ -0,0 +1,69 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/php.mdx */}
+
+**1. Install the agent**
+
+Install the PHP agent using one of the [published packages](https://github.com/elastic/apm-agent-php/releases/latest).
+
+To use the RPM Package (RHEL/CentOS and Fedora):
+
+```php
+rpm -ivh .rpm
+```
+
+To use the DEB package (Debian and Ubuntu):
+
+```php
+dpkg -i .deb
+```
+
+To use the APK package (Alpine):
+
+```php
+apk add --allow-untrusted .apk
+```
+
+If you can’t find your distribution, you can install the agent by building it from the source.
+The following instructions will build the APM agent using the same docker environment that Elastic uses to build our official packages.
+
+
+The agent is currently only available for Linux operating system.
+
+
+1. Download the [agent source](https://github.com/elastic/apm-agent-php/).
+2. Execute the following commands to build the agent and install it:
+
+```bash
+cd apm-agent-php
+# for linux glibc - libc distributions (Ubuntu, Redhat, etc)
+export BUILD_ARCHITECTURE=linux-x86-64
+# for linux with musl - libc distributions (Alpine)
+export BUILD_ARCHITECTURE=linuxmusl-x86-64
+# provide a path to php-config tool
+export PHP_CONFIG=php-config
+
+# build extensions
+make -f .ci/Makefile build
+
+# run extension tests
+PHP_VERSION=`$PHP_CONFIG --version | cut -d'.' -f 1,2` make -f .ci/Makefile run-phpt-tests
+
+# install agent extensions
+sudo cp agent/native/_build/${BUILD_ARCHITECTURE}-release/ext/elastic_apm-*.so `$PHP_CONFIG --extension-dir`
+
+# install automatic loader
+sudo cp agent/native/_build/${BUILD_ARCHITECTURE}-release/loader/code/elastic_apm_loader.so `$PHP_CONFIG --extension-dir`
+```
+
+**2. Enable and configure the APM agent**
+
+Enable and configure your agent inside of the `php.ini` file:
+
+```ini
+extension=elastic_apm_loader.so
+elastic_apm.bootstrap_php_part_file=/agent/php/bootstrap_php_part.php
+```
+
+**Learn more in the ((apm-agent)) reference**
+
+* [Supported technologies](((apm-php-ref))/supported-technologies.html)
+* [Configuration](((apm-py-ref))/configuration.html)
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/python.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/python.mdx
new file mode 100644
index 0000000000..5e48eaa4bc
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/python.mdx
@@ -0,0 +1,96 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/python.mdx */}
+
+Django and Flask are two of several frameworks that the Elastic APM Python Agent
+supports. For a complete list of supported technologies, refer to the
+[Elastic APM Python Agent documentation](((apm-py-ref))/supported-technologies.html).
+
+
+
+ ```python
+ $ pip install elastic-apm
+ ```
+
+ **1. Install the ((apm-agent))**
+
+ Install the ((apm-agent)) for Python as a dependency.
+
+ ```python
+ $ pip install elastic-apm
+ ```
+
+ **2. Configure the ((apm-agent))**
+
+ Agents are libraries that run inside of your application process.
+ APM services are created programmatically based on the `SERVICE_NAME`.
+
+ ```python
+ # Add the agent to the installed apps
+ INSTALLED_APPS = (
+ 'elasticapm.contrib.django',
+ # ...
+ )
+
+ ELASTIC_APM = {
+ # Set required service name. Allowed characters:
+ # a-z, A-Z, 0-9, -, _, and space
+ 'SERVICE_NAME': '',
+
+ # Use if APM integration requires a token
+ 'SECRET_TOKEN': '',
+
+ # Set custom APM integration host and port (default: http://localhost:8200)
+ 'SERVER_URL': '',
+ }
+
+ # To send performance metrics, add our tracing middleware:
+ MIDDLEWARE = (
+ 'elasticapm.contrib.django.middleware.TracingMiddleware',
+ #...
+ )
+ ```
+
+
+
+ **1. Install the ((apm-agent))**
+
+ Install the ((apm-agent)) for Python as a dependency.
+
+ ```python
+ $ pip install elastic-apm[flask]
+ ```
+
+ **2. Configure the ((apm-agent))**
+
+ Agents are libraries that run inside of your application process.
+ APM services are created programmatically based on the `SERVICE_NAME`.
+
+ ```python
+ # initialize using environment variables
+ from elasticapm.contrib.flask import ElasticAPM
+ app = Flask(__name__)
+ apm = ElasticAPM(app)
+
+ # or configure to use ELASTIC_APM in your application settings
+ from elasticapm.contrib.flask import ElasticAPM
+ app.config['ELASTIC_APM'] = {
+ # Set required service name. Allowed characters:
+ # a-z, A-Z, 0-9, -, _, and space
+ 'SERVICE_NAME': '',
+
+ # Use if APM integration requires a token
+ 'SECRET_TOKEN': '',
+
+ # Set custom APM integration host and port (default: http://localhost:8200)
+ 'SERVER_URL': '',
+ }
+
+ apm = ElasticAPM(app)
+ ```
+
+
+
+
+**Learn more in the ((apm-agent)) reference**
+
+* [Supported technologies](((apm-py-ref))/supported-technologies.html)
+* [Advanced configuration](((apm-py-ref))/configuration.html)
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/install-agents/ruby.mdx b/docs/en/serverless/transclusion/apm/guide/install-agents/ruby.mdx
new file mode 100644
index 0000000000..a2ec22661c
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/install-agents/ruby.mdx
@@ -0,0 +1,79 @@
+{/* Comes from sandbox.elastic.dev/test-books/apm/guide/transclusion/tab-widgets/install-agents/ruby.mdx */}
+
+**1. Install the ((apm-agent))**
+
+Add the agent to your Gemfile.
+
+```ruby
+gem 'elastic-apm'
+```
+
+**2. Configure the agent**
+
+
+
+ APM is automatically started when your app boots.
+ Configure the agent by creating the config file `config/elastic_apm.yml`:
+
+ ```ruby
+ # config/elastic_apm.yml:
+
+ # Set service name - allowed characters: a-z, A-Z, 0-9, -, _ and space
+ # Defaults to the name of your Rails app
+ service_name: 'my-service'
+
+ # Use if APM integration requires a token
+ secret_token: ''
+
+ # Set custom APM integration host and port (default: http://localhost:8200)
+ server_url: 'http://localhost:8200'
+ ```
+
+
+
+ For Rack or a compatible framework, like Sinatra, include the middleware in your app and start the agent.
+
+ ```ruby
+ # config.ru
+
+ app = lambda do |env|
+ [200, {'Content-Type' => 'text/plain'}, ['ok']]
+ end
+
+ # Wraps all requests in transactions and reports exceptions
+ use ElasticAPM::Middleware
+
+ # Start an instance of the Agent
+ ElasticAPM.start(service_name: 'NothingButRack')
+
+ run app
+
+ # Gracefully stop the agent when process exits.
+ # Makes sure any pending transactions are sent.
+ at_exit { ElasticAPM.stop }
+ ```
+
+ Create a config file `config/elastic_apm.yml`:
+
+ ```ruby
+ # config/elastic_apm.yml:
+
+ # Set service name - allowed characters: a-z, A-Z, 0-9, -, _ and space
+ # Defaults to the name of your Rack app's class.
+ service_name: 'my-service'
+
+ # Use if APM integration requires a token
+ secret_token: ''
+
+ # Set custom APM integration host and port (default: http://localhost:8200)
+ server_url: 'http://localhost:8200'
+ ```
+
+
+
+
+
+**Learn more in the ((apm-agent)) reference**
+
+* [Supported technologies](((apm-ruby-ref))/supported-technologies.html)
+* [Advanced configuration](((apm-ruby-ref))/configuration.html)
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/open-telemetry/otel-get-started.mdx b/docs/en/serverless/transclusion/apm/guide/open-telemetry/otel-get-started.mdx
new file mode 100644
index 0000000000..0f786ef726
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/open-telemetry/otel-get-started.mdx
@@ -0,0 +1,6 @@
+
+Elastic integrates with OpenTelemetry, allowing you to reuse your existing instrumentation
+to easily send observability data to Elastic.
+
+For more information on how to combine Elastic and OpenTelemetry,
+refer to .
diff --git a/docs/en/serverless/transclusion/apm/guide/spec/v2/error.mdx b/docs/en/serverless/transclusion/apm/guide/spec/v2/error.mdx
new file mode 100644
index 0000000000..23b18ba8b1
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/spec/v2/error.mdx
@@ -0,0 +1,1296 @@
+export const snippet = `
+{
+ "$id": "docs/spec/v2/error",
+ "description": "errorEvent represents an error or a logged error message, captured by an APM agent in a monitored service.",
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Context holds arbitrary contextual information for the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "cloud": {
+ "description": "Cloud holds fields related to the cloud or infrastructure the events are coming from.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "origin": {
+ "description": "Origin contains the self-nested field groups for cloud.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "account": {
+ "description": "The cloud account or organization id used to identify different entities in a multi-tenant environment.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "The cloud account or organization id used to identify different entities in a multi-tenant environment.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "provider": {
+ "description": "Name of the cloud provider.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "region": {
+ "description": "Region in which this host, resource, or service is located.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "service": {
+ "description": "The cloud service name is intended to distinguish services running on different platforms within a provider.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "The cloud service name is intended to distinguish services running on different platforms within a provider.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "custom": {
+ "description": "Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "message": {
+ "description": "Message holds details related to message receiving and publishing if the captured event integrates with a messaging system",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "age": {
+ "description": "Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "ms": {
+ "description": "Age of the message in milliseconds.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ }
+ }
+ },
+ "body": {
+ "description": "Body of the received message, similar to an HTTP request body",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "headers": {
+ "description": "Headers received with the message, similar to HTTP request headers.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "queue": {
+ "description": "Queue holds information about the message queue where the message is received.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name holds the name of the message queue where the message is received.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "routing_key": {
+ "description": "RoutingKey holds the optional routing key of the received message as set on the queuing system, such as in RabbitMQ.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "page": {
+ "description": "Page holds information related to the current page and page referers. It is only sent from RUM agents.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "referer": {
+ "description": "Referer holds the URL of the page that 'linked' to the current page.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "url": {
+ "description": "URL of the current page",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "request": {
+ "description": "Request describes the HTTP request information in case the event was created as a result of an HTTP request.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "body": {
+ "description": "Body only contais the request bod, not the query string information. It can either be a dictionary (for standard HTTP requests) or a raw request body.",
+ "type": [
+ "null",
+ "string",
+ "object"
+ ]
+ },
+ "cookies": {
+ "description": "Cookies used by the request, parsed as key-value objects.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "env": {
+ "description": "Env holds environment variable information passed to the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "headers": {
+ "description": "Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "http_version": {
+ "description": "HTTPVersion holds information about the used HTTP version.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "method": {
+ "description": "Method holds information about the method of the HTTP request.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "socket": {
+ "description": "Socket holds information related to the recorded request, such as whether or not data were encrypted and the remote address.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "encrypted": {
+ "description": "Encrypted indicates whether a request was sent as TLS/HTTPS request. DEPRECATED: this field will be removed in a future release.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "remote_address": {
+ "description": "RemoteAddress holds the network address sending the request. It should be obtained through standard APIs and not be parsed from any headers like 'Forwarded'.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "url": {
+ "description": "URL holds information sucha as the raw URL, scheme, host and path.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "full": {
+ "description": "Full, possibly agent-assembled URL of the request, e.g. https://example.com:443/search?q=elasticsearch#top.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "hash": {
+ "description": "Hash of the request URL, e.g. 'top'",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "hostname": {
+ "description": "Hostname information of the request, e.g. 'example.com'.\"",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "pathname": {
+ "description": "Path of the request, e.g. '/search'",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "port": {
+ "description": "Port of the request, e.g. '443'. Can be sent as string or int.",
+ "type": [
+ "null",
+ "string",
+ "integer"
+ ],
+ "maxLength": 1024
+ },
+ "protocol": {
+ "description": "Protocol information for the recorded request, e.g. 'https:'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "raw": {
+ "description": "Raw unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "search": {
+ "description": "Search contains the query string information of the request. It is expected to have values delimited by ampersands.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ },
+ "required": [
+ "method"
+ ]
+ },
+ "response": {
+ "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "decoded_body_size": {
+ "description": "DecodedBodySize holds the size of the decoded payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "encoded_body_size": {
+ "description": "EncodedBodySize holds the size of the encoded payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "finished": {
+ "description": "Finished indicates whether the response was finished or not.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "headers": {
+ "description": "Headers holds the http headers sent in the http response.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "headers_sent": {
+ "description": "HeadersSent indicates whether http headers were sent.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "status_code": {
+ "description": "StatusCode sent in the http response.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "transfer_size": {
+ "description": "TransferSize holds the total size of the payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ }
+ }
+ },
+ "service": {
+ "description": "Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "agent": {
+ "description": "Agent holds information about the APM agent capturing the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "ephemeral_id": {
+ "description": "EphemeralID is a free format ID used for metrics correlation by agents",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "environment": {
+ "description": "Environment in which the monitored service is running, e.g. \`production\` or \`staging\`.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "framework": {
+ "description": "Framework holds information about the framework used in the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "id": {
+ "description": "ID holds a unique identifier for the service.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "language": {
+ "description": "Language holds information about the programming language of the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "name": {
+ "description": "Name of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024,
+ "pattern": "^[a-zA-Z0-9 _-]+$"
+ },
+ "node": {
+ "description": "Node must be a unique meaningful name of the service node.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "configured_name": {
+ "description": "Name of the service node",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "origin": {
+ "description": "Origin contains the self-nested field groups for service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "Immutable id of the service emitting this event.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "name": {
+ "description": "Immutable name of the service emitting this event.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "version": {
+ "description": "The version of the service the data was collected from.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "runtime": {
+ "description": "Runtime holds information about the language runtime running the monitored service",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the language runtime",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the language runtime",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "target": {
+ "description": "Target holds information about the outgoing service in case of an outgoing event",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Immutable name of the target service for the event",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "type": {
+ "description": "Immutable type of the target service for the event",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ {
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ }
+ ]
+ },
+ "version": {
+ "description": "Version of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "tags": {
+ "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "string",
+ "boolean",
+ "number"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "user": {
+ "description": "User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "domain": {
+ "description": "Domain of the logged in user",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "email": {
+ "description": "Email of the user.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "id": {
+ "description": "ID identifies the logged in user, e.g. can be the primary key of the user",
+ "type": [
+ "null",
+ "string",
+ "integer"
+ ],
+ "maxLength": 1024
+ },
+ "username": {
+ "description": "Name of the user.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ }
+ },
+ "culprit": {
+ "description": "Culprit identifies the function call which was the primary perpetrator of this event.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "exception": {
+ "description": "Exception holds information about the original error. The information is language specific.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "attributes": {
+ "description": "Attributes of the exception.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "cause": {
+ "description": "Cause can hold a collection of error exceptions representing chained exceptions. The chain starts with the outermost exception, followed by its cause, and so on.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object"
+ },
+ "minItems": 0
+ },
+ "code": {
+ "description": "Code that is set when the error happened, e.g. database error code.",
+ "type": [
+ "null",
+ "string",
+ "integer"
+ ],
+ "maxLength": 1024
+ },
+ "handled": {
+ "description": "Handled indicates whether the error was caught in the code or not.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "message": {
+ "description": "Message contains the originally captured error message.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "module": {
+ "description": "Module describes the exception type's module namespace.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "stacktrace": {
+ "description": "Stacktrace information of the captured exception.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object",
+ "properties": {
+ "abs_path": {
+ "description": "AbsPath is the absolute path of the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "classname": {
+ "description": "Classname of the frame.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "colno": {
+ "description": "ColumnNumber of the frame.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "context_line": {
+ "description": "ContextLine is the line from the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "filename": {
+ "description": "Filename is the relative name of the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "function": {
+ "description": "Function represented by the frame.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "library_frame": {
+ "description": "LibraryFrame indicates whether the frame is from a third party library.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "lineno": {
+ "description": "LineNumber of the frame.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "module": {
+ "description": "Module to which the frame belongs to.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "post_context": {
+ "description": "PostContext is a slice of code lines immediately before the line from the frame's file.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "pre_context": {
+ "description": "PreContext is a slice of code lines immediately after the line from the frame's file.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "vars": {
+ "description": "Vars is a flat mapping of local variables of the frame.",
+ "type": [
+ "null",
+ "object"
+ ]
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "classname": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "classname"
+ ]
+ },
+ {
+ "properties": {
+ "filename": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "filename"
+ ]
+ }
+ ]
+ },
+ "minItems": 0
+ },
+ "type": {
+ "description": "Type of the exception.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "message": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "message"
+ ]
+ },
+ {
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ }
+ ]
+ },
+ "id": {
+ "description": "ID holds the hex encoded 128 random bits ID of the event.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "log": {
+ "description": "Log holds additional information added when the error is logged.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "level": {
+ "description": "Level represents the severity of the recorded log.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "logger_name": {
+ "description": "LoggerName holds the name of the used logger instance.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "message": {
+ "description": "Message of the logged error. In case a parameterized message is captured, Message should contain the same information, but with any placeholders being replaced.",
+ "type": "string"
+ },
+ "param_message": {
+ "description": "ParamMessage should contain the same information as Message, but with placeholders where parameters were logged, e.g. 'error connecting to %s'. The string is not interpreted, allowing differnt placeholders per client languange. The information might be used to group errors together.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "stacktrace": {
+ "description": "Stacktrace information of the captured error.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object",
+ "properties": {
+ "abs_path": {
+ "description": "AbsPath is the absolute path of the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "classname": {
+ "description": "Classname of the frame.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "colno": {
+ "description": "ColumnNumber of the frame.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "context_line": {
+ "description": "ContextLine is the line from the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "filename": {
+ "description": "Filename is the relative name of the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "function": {
+ "description": "Function represented by the frame.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "library_frame": {
+ "description": "LibraryFrame indicates whether the frame is from a third party library.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "lineno": {
+ "description": "LineNumber of the frame.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "module": {
+ "description": "Module to which the frame belongs to.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "post_context": {
+ "description": "PostContext is a slice of code lines immediately before the line from the frame's file.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "pre_context": {
+ "description": "PreContext is a slice of code lines immediately after the line from the frame's file.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "vars": {
+ "description": "Vars is a flat mapping of local variables of the frame.",
+ "type": [
+ "null",
+ "object"
+ ]
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "classname": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "classname"
+ ]
+ },
+ {
+ "properties": {
+ "filename": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "filename"
+ ]
+ }
+ ]
+ },
+ "minItems": 0
+ }
+ },
+ "required": [
+ "message"
+ ]
+ },
+ "parent_id": {
+ "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "timestamp": {
+ "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "trace_id": {
+ "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "transaction": {
+ "description": "Transaction holds information about the correlated transaction.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name is the generic designation of a transaction in the scope of a single service, eg: 'GET /users/:id'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "sampled": {
+ "description": "Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "type": {
+ "description": "Type expresses the correlated transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "transaction_id": {
+ "description": "TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "id"
+ ],
+ "allOf": [
+ {
+ "if": {
+ "properties": {
+ "transaction_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "transaction_id"
+ ]
+ },
+ "then": {
+ "properties": {
+ "parent_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "parent_id"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "trace_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "trace_id"
+ ]
+ },
+ "then": {
+ "properties": {
+ "parent_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "parent_id"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "transaction_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "transaction_id"
+ ]
+ },
+ "then": {
+ "properties": {
+ "trace_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "trace_id"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "parent_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "parent_id"
+ ]
+ },
+ "then": {
+ "properties": {
+ "trace_id": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "trace_id"
+ ]
+ }
+ }
+ ],
+ "anyOf": [
+ {
+ "properties": {
+ "exception": {
+ "type": "object"
+ }
+ },
+ "required": [
+ "exception"
+ ]
+ },
+ {
+ "properties": {
+ "log": {
+ "type": "object"
+ }
+ },
+ "required": [
+ "log"
+ ]
+ }
+ ]
+}`
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/spec/v2/metadata.mdx b/docs/en/serverless/transclusion/apm/guide/spec/v2/metadata.mdx
new file mode 100644
index 0000000000..a4cfb12600
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/spec/v2/metadata.mdx
@@ -0,0 +1,570 @@
+export const snippet = `
+{
+ "$id": "docs/spec/v2/metadata",
+ "type": "object",
+ "properties": {
+ "cloud": {
+ "description": "Cloud metadata about where the monitored service is running.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "account": {
+ "description": "Account where the monitored service is running.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "ID of the cloud account.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the cloud account.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "availability_zone": {
+ "description": "AvailabilityZone where the monitored service is running, e.g. us-east-1a",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "instance": {
+ "description": "Instance on which the monitored service is running.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "ID of the cloud instance.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the cloud instance.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "machine": {
+ "description": "Machine on which the monitored service is running.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "type": {
+ "description": "ID of the cloud machine.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "project": {
+ "description": "Project in which the monitored service is running.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "ID of the cloud project.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the cloud project.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "provider": {
+ "description": "Provider that is used, e.g. aws, azure, gcp, digitalocean.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "region": {
+ "description": "Region where the monitored service is running, e.g. us-east-1",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "service": {
+ "description": "Service that is monitored on cloud",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the cloud service, intended to distinguish services running on different platforms within a provider, eg AWS EC2 vs Lambda, GCP GCE vs App Engine, Azure VM vs App Server.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "labels": {
+ "description": "Labels are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Labels are indexed and searchable.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "string",
+ "boolean",
+ "number"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "network": {
+ "description": "Network holds information about the network over which the monitored service is communicating.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "connection": {
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "type": {
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ }
+ },
+ "process": {
+ "description": "Process metadata about the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "argv": {
+ "description": "Argv holds the command line arguments used to start this process.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "pid": {
+ "description": "PID holds the process ID of the service.",
+ "type": "integer"
+ },
+ "ppid": {
+ "description": "Ppid holds the parent process ID of the service.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "title": {
+ "description": "Title is the process title. It can be the same as process name.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "pid"
+ ]
+ },
+ "service": {
+ "description": "Service metadata about the monitored service.",
+ "type": "object",
+ "properties": {
+ "agent": {
+ "description": "Agent holds information about the APM agent capturing the event.",
+ "type": "object",
+ "properties": {
+ "activation_method": {
+ "description": "ActivationMethod of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "ephemeral_id": {
+ "description": "EphemeralID is a free format ID used for metrics correlation by agents",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the APM agent capturing information.",
+ "type": "string",
+ "maxLength": 1024,
+ "minLength": 1
+ },
+ "version": {
+ "description": "Version of the APM agent capturing information.",
+ "type": "string",
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "name",
+ "version"
+ ]
+ },
+ "environment": {
+ "description": "Environment in which the monitored service is running, e.g. \`production\` or \`staging\`.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "framework": {
+ "description": "Framework holds information about the framework used in the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "id": {
+ "description": "ID holds a unique identifier for the running service.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "language": {
+ "description": "Language holds information about the programming language of the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used programming language",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "name": {
+ "description": "Name of the monitored service.",
+ "type": "string",
+ "maxLength": 1024,
+ "minLength": 1,
+ "pattern": "^[a-zA-Z0-9 _-]+$"
+ },
+ "node": {
+ "description": "Node must be a unique meaningful name of the service node.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "configured_name": {
+ "description": "Name of the service node",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "runtime": {
+ "description": "Runtime holds information about the language runtime running the monitored service",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the language runtime",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Name of the language runtime",
+ "type": "string",
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "name",
+ "version"
+ ]
+ },
+ "version": {
+ "description": "Version of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "agent",
+ "name"
+ ]
+ },
+ "system": {
+ "description": "System metadata",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "architecture": {
+ "description": "Architecture of the system the monitored service is running on.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "configured_hostname": {
+ "description": "ConfiguredHostname is the configured name of the host the monitored service is running on. It should only be sent when configured by the user. If given, it is used as the event's hostname.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "container": {
+ "description": "Container holds the system's container ID if available.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "ID of the container the monitored service is running in.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "detected_hostname": {
+ "description": "DetectedHostname is the hostname detected by the APM agent. It usually contains what the hostname command returns on the host machine. It will be used as the event's hostname if ConfiguredHostname is not present.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "hostname": {
+ "description": "Deprecated: Use ConfiguredHostname and DetectedHostname instead. DeprecatedHostname is the host name of the system the service is running on. It does not distinguish between configured and detected hostname and therefore is deprecated and only used if no other hostname information is available.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "kubernetes": {
+ "description": "Kubernetes system information if the monitored service runs on Kubernetes.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "namespace": {
+ "description": "Namespace of the Kubernetes resource the monitored service is run on.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "node": {
+ "description": "Node related information",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the Kubernetes Node",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "pod": {
+ "description": "Pod related information",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the Kubernetes Pod",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "uid": {
+ "description": "UID is the system-generated string uniquely identifying the Pod.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ }
+ },
+ "platform": {
+ "description": "Platform name of the system platform the monitored service is running on.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "user": {
+ "description": "User metadata, which can be overwritten on a per event basis.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "domain": {
+ "description": "Domain of the logged in user",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "email": {
+ "description": "Email of the user.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "id": {
+ "description": "ID identifies the logged in user, e.g. can be the primary key of the user",
+ "type": [
+ "null",
+ "string",
+ "integer"
+ ],
+ "maxLength": 1024
+ },
+ "username": {
+ "description": "Name of the user.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ },
+ "required": [
+ "service"
+ ]
+}`
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/spec/v2/metricset.mdx b/docs/en/serverless/transclusion/apm/guide/spec/v2/metricset.mdx
new file mode 100644
index 0000000000..5e47004d9a
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/spec/v2/metricset.mdx
@@ -0,0 +1,304 @@
+export const snippet = `
+{
+ "$id": "docs/spec/v2/metricset",
+ "type": "object",
+ "properties": {
+ "faas": {
+ "description": "FAAS holds fields related to Function as a Service events.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "coldstart": {
+ "description": "Indicates whether a function invocation was a cold start or not.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "execution": {
+ "description": "The request id of the function invocation.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "id": {
+ "description": "A unique identifier of the invoked serverless function.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "name": {
+ "description": "The lambda function name.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "trigger": {
+ "description": "Trigger attributes.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "request_id": {
+ "description": "The id of the origin trigger request.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "type": {
+ "description": "The trigger type.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "version": {
+ "description": "The lambda function version.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "samples": {
+ "description": "Samples hold application metrics collected from the agent.",
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[^*\"]*$": {
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "counts": {
+ "description": "Counts holds the bucket counts for histogram metrics. These numbers must be positive or zero. If Counts is specified, then Values is expected to be specified with the same number of elements, and with the same order.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "minItems": 0
+ },
+ "type": {
+ "description": "Type holds an optional metric type: gauge, counter, or histogram. If Type is unknown, it will be ignored.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "unit": {
+ "description": "Unit holds an optional unit for the metric. - \"percent\" (value is in the range [0,1]) - \"byte\" - a time unit: \"nanos\", \"micros\", \"ms\", \"s\", \"m\", \"h\", \"d\" If Unit is unknown, it will be ignored.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "value": {
+ "description": "Value holds the value of a single metric sample.",
+ "type": [
+ "null",
+ "number"
+ ]
+ },
+ "values": {
+ "description": "Values holds the bucket values for histogram metrics. Values must be provided in ascending order; failure to do so will result in the metric being discarded.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "number"
+ },
+ "minItems": 0
+ }
+ },
+ "allOf": [
+ {
+ "if": {
+ "properties": {
+ "counts": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "counts"
+ ]
+ },
+ "then": {
+ "properties": {
+ "values": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "values"
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "values": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "values"
+ ]
+ },
+ "then": {
+ "properties": {
+ "counts": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "counts"
+ ]
+ }
+ }
+ ],
+ "anyOf": [
+ {
+ "properties": {
+ "value": {
+ "type": "number"
+ }
+ },
+ "required": [
+ "value"
+ ]
+ },
+ {
+ "properties": {
+ "values": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "values"
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "service": {
+ "description": "Service holds selected information about the correlated service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the correlated service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the correlated service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "span": {
+ "description": "Span holds selected information about the correlated transaction.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "subtype": {
+ "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "type": {
+ "description": "Type expresses the correlated span's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "tags": {
+ "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "string",
+ "boolean",
+ "number"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "timestamp": {
+ "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "transaction": {
+ "description": "Transaction holds selected information about the correlated transaction.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the correlated transaction.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "type": {
+ "description": "Type expresses the correlated transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ },
+ "required": [
+ "samples"
+ ]
+}`
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/spec/v2/span.mdx b/docs/en/serverless/transclusion/apm/guide/spec/v2/span.mdx
new file mode 100644
index 0000000000..6ac47163fb
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/spec/v2/span.mdx
@@ -0,0 +1,906 @@
+export const snippet = `
+{
+ "$id": "docs/spec/v2/span",
+ "type": "object",
+ "properties": {
+ "action": {
+ "description": "Action holds the specific kind of event within the sub-type represented by the span (e.g. query, connect)",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "child_ids": {
+ "description": "ChildIDs holds a list of successor transactions and/or spans.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string",
+ "maxLength": 1024
+ },
+ "minItems": 0
+ },
+ "composite": {
+ "description": "Composite holds details on a group of spans represented by a single one.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "compression_strategy": {
+ "description": "A string value indicating which compression strategy was used. The valid values are \`exact_match\` and \`same_kind\`.",
+ "type": "string"
+ },
+ "count": {
+ "description": "Count is the number of compressed spans the composite span represents. The minimum count is 2, as a composite span represents at least two spans.",
+ "type": "integer",
+ "minimum": 2
+ },
+ "sum": {
+ "description": "Sum is the durations of all compressed spans this composite span represents in milliseconds.",
+ "type": "number",
+ "minimum": 0
+ }
+ },
+ "required": [
+ "compression_strategy",
+ "count",
+ "sum"
+ ]
+ },
+ "context": {
+ "description": "Context holds arbitrary contextual information for the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "db": {
+ "description": "Database contains contextual data for database spans",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "instance": {
+ "description": "Instance name of the database.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "link": {
+ "description": "Link to the database server.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "rows_affected": {
+ "description": "RowsAffected shows the number of rows affected by the statement.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "statement": {
+ "description": "Statement of the recorded database event, e.g. query.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "type": {
+ "description": "Type of the recorded database event., e.g. sql, cassandra, hbase, redis.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "user": {
+ "description": "User is the username with which the database is accessed.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "destination": {
+ "description": "Destination contains contextual data about the destination of spans",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "address": {
+ "description": "Address is the destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') IPv6 (e.g. '::1')",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "port": {
+ "description": "Port is the destination network port (e.g. 443)",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "service": {
+ "description": "Service describes the destination service",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name is the identifier for the destination service, e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' ( DEPRECATED: this field will be removed in a future release",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "resource": {
+ "description": "Resource identifies the destination service resource being operated on e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name' DEPRECATED: this field will be removed in a future release",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "type": {
+ "description": "Type of the destination service, e.g. db, elasticsearch. Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "resource"
+ ]
+ }
+ }
+ },
+ "http": {
+ "description": "HTTP contains contextual information when the span concerns an HTTP request.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "method": {
+ "description": "Method holds information about the method of the HTTP request.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "request": {
+ "description": "Request describes the HTTP request information.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "ID holds the unique identifier for the http request.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "response": {
+ "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "decoded_body_size": {
+ "description": "DecodedBodySize holds the size of the decoded payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "encoded_body_size": {
+ "description": "EncodedBodySize holds the size of the encoded payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "headers": {
+ "description": "Headers holds the http headers sent in the http response.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "status_code": {
+ "description": "StatusCode sent in the http response.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "transfer_size": {
+ "description": "TransferSize holds the total size of the payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ }
+ }
+ },
+ "status_code": {
+ "description": "Deprecated: Use Response.StatusCode instead. StatusCode sent in the http response.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "url": {
+ "description": "URL is the raw url of the correlating HTTP request.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "message": {
+ "description": "Message holds details related to message receiving and publishing if the captured event integrates with a messaging system",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "age": {
+ "description": "Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "ms": {
+ "description": "Age of the message in milliseconds.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ }
+ }
+ },
+ "body": {
+ "description": "Body of the received message, similar to an HTTP request body",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "headers": {
+ "description": "Headers received with the message, similar to HTTP request headers.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "queue": {
+ "description": "Queue holds information about the message queue where the message is received.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name holds the name of the message queue where the message is received.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "routing_key": {
+ "description": "RoutingKey holds the optional routing key of the received message as set on the queuing system, such as in RabbitMQ.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "service": {
+ "description": "Service related information can be sent per span. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "agent": {
+ "description": "Agent holds information about the APM agent capturing the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "ephemeral_id": {
+ "description": "EphemeralID is a free format ID used for metrics correlation by agents",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "environment": {
+ "description": "Environment in which the monitored service is running, e.g. \`production\` or \`staging\`.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "framework": {
+ "description": "Framework holds information about the framework used in the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "id": {
+ "description": "ID holds a unique identifier for the service.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "language": {
+ "description": "Language holds information about the programming language of the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "name": {
+ "description": "Name of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024,
+ "pattern": "^[a-zA-Z0-9 _-]+$"
+ },
+ "node": {
+ "description": "Node must be a unique meaningful name of the service node.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "configured_name": {
+ "description": "Name of the service node",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "origin": {
+ "description": "Origin contains the self-nested field groups for service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "Immutable id of the service emitting this event.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "name": {
+ "description": "Immutable name of the service emitting this event.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "version": {
+ "description": "The version of the service the data was collected from.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "runtime": {
+ "description": "Runtime holds information about the language runtime running the monitored service",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the language runtime",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the language runtime",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "target": {
+ "description": "Target holds information about the outgoing service in case of an outgoing event",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Immutable name of the target service for the event",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "type": {
+ "description": "Immutable type of the target service for the event",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ {
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ }
+ ]
+ },
+ "version": {
+ "description": "Version of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "tags": {
+ "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "string",
+ "boolean",
+ "number"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ },
+ "duration": {
+ "description": "Duration of the span in milliseconds. When the span is a composite one, duration is the gross duration, including \"whitespace\" in between spans.",
+ "type": "number",
+ "minimum": 0
+ },
+ "id": {
+ "description": "ID holds the hex encoded 64 random bits ID of the event.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "links": {
+ "description": "Links holds links to other spans, potentially in other traces.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object",
+ "properties": {
+ "span_id": {
+ "description": "SpanID holds the ID of the linked span.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "trace_id": {
+ "description": "TraceID holds the ID of the linked span's trace.",
+ "type": "string",
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "span_id",
+ "trace_id"
+ ]
+ },
+ "minItems": 0
+ },
+ "name": {
+ "description": "Name is the generic designation of a span in the scope of a transaction.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "otel": {
+ "description": "OTel contains unmapped OpenTelemetry attributes.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "attributes": {
+ "description": "Attributes hold the unmapped OpenTelemetry attributes.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "span_kind": {
+ "description": "SpanKind holds the incoming OpenTelemetry span kind.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "outcome": {
+ "description": "Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "enum": [
+ "success",
+ "failure",
+ "unknown",
+ null
+ ]
+ },
+ "parent_id": {
+ "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "sample_rate": {
+ "description": "SampleRate applied to the monitored service at the time where this span was recorded.",
+ "type": [
+ "null",
+ "number"
+ ]
+ },
+ "stacktrace": {
+ "description": "Stacktrace connected to this span event.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object",
+ "properties": {
+ "abs_path": {
+ "description": "AbsPath is the absolute path of the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "classname": {
+ "description": "Classname of the frame.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "colno": {
+ "description": "ColumnNumber of the frame.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "context_line": {
+ "description": "ContextLine is the line from the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "filename": {
+ "description": "Filename is the relative name of the frame's file.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "function": {
+ "description": "Function represented by the frame.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "library_frame": {
+ "description": "LibraryFrame indicates whether the frame is from a third party library.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "lineno": {
+ "description": "LineNumber of the frame.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "module": {
+ "description": "Module to which the frame belongs to.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "post_context": {
+ "description": "PostContext is a slice of code lines immediately before the line from the frame's file.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "pre_context": {
+ "description": "PreContext is a slice of code lines immediately after the line from the frame's file.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0
+ },
+ "vars": {
+ "description": "Vars is a flat mapping of local variables of the frame.",
+ "type": [
+ "null",
+ "object"
+ ]
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "classname": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "classname"
+ ]
+ },
+ {
+ "properties": {
+ "filename": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "filename"
+ ]
+ }
+ ]
+ },
+ "minItems": 0
+ },
+ "start": {
+ "description": "Start is the offset relative to the transaction's timestamp identifying the start of the span, in milliseconds.",
+ "type": [
+ "null",
+ "number"
+ ]
+ },
+ "subtype": {
+ "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "sync": {
+ "description": "Sync indicates whether the span was executed synchronously or asynchronously.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "timestamp": {
+ "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "trace_id": {
+ "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "transaction_id": {
+ "description": "TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "type": {
+ "description": "Type holds the span's type, and can have specific keywords within the service's domain (eg: 'request', 'backgroundjob', etc)",
+ "type": "string",
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "id",
+ "trace_id",
+ "name",
+ "parent_id",
+ "type",
+ "duration"
+ ],
+ "anyOf": [
+ {
+ "properties": {
+ "start": {
+ "type": "number"
+ }
+ },
+ "required": [
+ "start"
+ ]
+ },
+ {
+ "properties": {
+ "timestamp": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "timestamp"
+ ]
+ }
+ ]
+}`
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/spec/v2/transaction.mdx b/docs/en/serverless/transclusion/apm/guide/spec/v2/transaction.mdx
new file mode 100644
index 0000000000..610c92fa2a
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/spec/v2/transaction.mdx
@@ -0,0 +1,1134 @@
+export const snippet = `
+{
+ "$id": "docs/spec/v2/transaction",
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Context holds arbitrary contextual information for the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "cloud": {
+ "description": "Cloud holds fields related to the cloud or infrastructure the events are coming from.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "origin": {
+ "description": "Origin contains the self-nested field groups for cloud.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "account": {
+ "description": "The cloud account or organization id used to identify different entities in a multi-tenant environment.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "The cloud account or organization id used to identify different entities in a multi-tenant environment.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "provider": {
+ "description": "Name of the cloud provider.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "region": {
+ "description": "Region in which this host, resource, or service is located.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "service": {
+ "description": "The cloud service name is intended to distinguish services running on different platforms within a provider.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "The cloud service name is intended to distinguish services running on different platforms within a provider.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "custom": {
+ "description": "Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "message": {
+ "description": "Message holds details related to message receiving and publishing if the captured event integrates with a messaging system",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "age": {
+ "description": "Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "ms": {
+ "description": "Age of the message in milliseconds.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ }
+ }
+ },
+ "body": {
+ "description": "Body of the received message, similar to an HTTP request body",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "headers": {
+ "description": "Headers received with the message, similar to HTTP request headers.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "queue": {
+ "description": "Queue holds information about the message queue where the message is received.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name holds the name of the message queue where the message is received.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "routing_key": {
+ "description": "RoutingKey holds the optional routing key of the received message as set on the queuing system, such as in RabbitMQ.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "page": {
+ "description": "Page holds information related to the current page and page referers. It is only sent from RUM agents.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "referer": {
+ "description": "Referer holds the URL of the page that 'linked' to the current page.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "url": {
+ "description": "URL of the current page",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "request": {
+ "description": "Request describes the HTTP request information in case the event was created as a result of an HTTP request.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "body": {
+ "description": "Body only contais the request bod, not the query string information. It can either be a dictionary (for standard HTTP requests) or a raw request body.",
+ "type": [
+ "null",
+ "string",
+ "object"
+ ]
+ },
+ "cookies": {
+ "description": "Cookies used by the request, parsed as key-value objects.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "env": {
+ "description": "Env holds environment variable information passed to the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "headers": {
+ "description": "Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "http_version": {
+ "description": "HTTPVersion holds information about the used HTTP version.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "method": {
+ "description": "Method holds information about the method of the HTTP request.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "socket": {
+ "description": "Socket holds information related to the recorded request, such as whether or not data were encrypted and the remote address.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "encrypted": {
+ "description": "Encrypted indicates whether a request was sent as TLS/HTTPS request. DEPRECATED: this field will be removed in a future release.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "remote_address": {
+ "description": "RemoteAddress holds the network address sending the request. It should be obtained through standard APIs and not be parsed from any headers like 'Forwarded'.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "url": {
+ "description": "URL holds information sucha as the raw URL, scheme, host and path.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "full": {
+ "description": "Full, possibly agent-assembled URL of the request, e.g. https://example.com:443/search?q=elasticsearch#top.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "hash": {
+ "description": "Hash of the request URL, e.g. 'top'",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "hostname": {
+ "description": "Hostname information of the request, e.g. 'example.com'.\"",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "pathname": {
+ "description": "Path of the request, e.g. '/search'",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "port": {
+ "description": "Port of the request, e.g. '443'. Can be sent as string or int.",
+ "type": [
+ "null",
+ "string",
+ "integer"
+ ],
+ "maxLength": 1024
+ },
+ "protocol": {
+ "description": "Protocol information for the recorded request, e.g. 'https:'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "raw": {
+ "description": "Raw unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "search": {
+ "description": "Search contains the query string information of the request. It is expected to have values delimited by ampersands.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ },
+ "required": [
+ "method"
+ ]
+ },
+ "response": {
+ "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "decoded_body_size": {
+ "description": "DecodedBodySize holds the size of the decoded payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "encoded_body_size": {
+ "description": "EncodedBodySize holds the size of the encoded payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "finished": {
+ "description": "Finished indicates whether the response was finished or not.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "headers": {
+ "description": "Headers holds the http headers sent in the http response.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "[.*]*$": {
+ "type": [
+ "null",
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "headers_sent": {
+ "description": "HeadersSent indicates whether http headers were sent.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "status_code": {
+ "description": "StatusCode sent in the http response.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "transfer_size": {
+ "description": "TransferSize holds the total size of the payload.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ }
+ }
+ },
+ "service": {
+ "description": "Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "agent": {
+ "description": "Agent holds information about the APM agent capturing the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "ephemeral_id": {
+ "description": "EphemeralID is a free format ID used for metrics correlation by agents",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "name": {
+ "description": "Name of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the APM agent capturing information.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "environment": {
+ "description": "Environment in which the monitored service is running, e.g. \`production\` or \`staging\`.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "framework": {
+ "description": "Framework holds information about the framework used in the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used framework",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "id": {
+ "description": "ID holds a unique identifier for the service.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "language": {
+ "description": "Language holds information about the programming language of the monitored service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the used programming language",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "name": {
+ "description": "Name of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024,
+ "pattern": "^[a-zA-Z0-9 _-]+$"
+ },
+ "node": {
+ "description": "Node must be a unique meaningful name of the service node.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "configured_name": {
+ "description": "Name of the service node",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "origin": {
+ "description": "Origin contains the self-nested field groups for service.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "Immutable id of the service emitting this event.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "name": {
+ "description": "Immutable name of the service emitting this event.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "version": {
+ "description": "The version of the service the data was collected from.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "runtime": {
+ "description": "Runtime holds information about the language runtime running the monitored service",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the language runtime",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "version": {
+ "description": "Version of the language runtime",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "target": {
+ "description": "Target holds information about the outgoing service in case of an outgoing event",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "name": {
+ "description": "Immutable name of the target service for the event",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "type": {
+ "description": "Immutable type of the target service for the event",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ },
+ "anyOf": [
+ {
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ {
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ }
+ ]
+ },
+ "version": {
+ "description": "Version of the monitored service.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ },
+ "tags": {
+ "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "string",
+ "boolean",
+ "number"
+ ],
+ "maxLength": 1024
+ }
+ },
+ "user": {
+ "description": "User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "domain": {
+ "description": "Domain of the logged in user",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "email": {
+ "description": "Email of the user.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "id": {
+ "description": "ID identifies the logged in user, e.g. can be the primary key of the user",
+ "type": [
+ "null",
+ "string",
+ "integer"
+ ],
+ "maxLength": 1024
+ },
+ "username": {
+ "description": "Name of the user.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ }
+ }
+ }
+ }
+ },
+ "dropped_spans_stats": {
+ "description": "DroppedSpanStats holds information about spans that were dropped (for example due to transaction_max_spans or exit_span_min_duration).",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object",
+ "properties": {
+ "destination_service_resource": {
+ "description": "DestinationServiceResource identifies the destination service resource being operated on. e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "duration": {
+ "description": "Duration holds duration aggregations about the dropped span.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "count": {
+ "description": "Count holds the number of times the dropped span happened.",
+ "type": [
+ "null",
+ "integer"
+ ],
+ "minimum": 1
+ },
+ "sum": {
+ "description": "Sum holds dimensions about the dropped span's duration.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "us": {
+ "description": "Us represents the summation of the span duration.",
+ "type": [
+ "null",
+ "integer"
+ ],
+ "minimum": 0
+ }
+ }
+ }
+ }
+ },
+ "outcome": {
+ "description": "Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "enum": [
+ "success",
+ "failure",
+ "unknown",
+ null
+ ]
+ },
+ "service_target_name": {
+ "description": "ServiceTargetName identifies the instance name of the target service being operated on",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 512
+ },
+ "service_target_type": {
+ "description": "ServiceTargetType identifies the type of the target service being operated on e.g. 'oracle', 'rabbitmq'",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 512
+ }
+ }
+ },
+ "minItems": 0
+ },
+ "duration": {
+ "description": "Duration how long the transaction took to complete, in milliseconds with 3 decimal points.",
+ "type": "number",
+ "minimum": 0
+ },
+ "experience": {
+ "description": "UserExperience holds metrics for measuring real user experience. This information is only sent by RUM agents.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "cls": {
+ "description": "CumulativeLayoutShift holds the Cumulative Layout Shift (CLS) metric value, or a negative value if CLS is unknown. See https://web.dev/cls/",
+ "type": [
+ "null",
+ "number"
+ ],
+ "minimum": 0
+ },
+ "fid": {
+ "description": "FirstInputDelay holds the First Input Delay (FID) metric value, or a negative value if FID is unknown. See https://web.dev/fid/",
+ "type": [
+ "null",
+ "number"
+ ],
+ "minimum": 0
+ },
+ "longtask": {
+ "description": "Longtask holds longtask duration/count metrics.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "count": {
+ "description": "Count is the total number of of longtasks.",
+ "type": "integer",
+ "minimum": 0
+ },
+ "max": {
+ "description": "Max longtask duration",
+ "type": "number",
+ "minimum": 0
+ },
+ "sum": {
+ "description": "Sum of longtask durations",
+ "type": "number",
+ "minimum": 0
+ }
+ },
+ "required": [
+ "count",
+ "max",
+ "sum"
+ ]
+ },
+ "tbt": {
+ "description": "TotalBlockingTime holds the Total Blocking Time (TBT) metric value, or a negative value if TBT is unknown. See https://web.dev/tbt/",
+ "type": [
+ "null",
+ "number"
+ ],
+ "minimum": 0
+ }
+ }
+ },
+ "faas": {
+ "description": "FAAS holds fields related to Function as a Service events.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "coldstart": {
+ "description": "Indicates whether a function invocation was a cold start or not.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "execution": {
+ "description": "The request id of the function invocation.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "id": {
+ "description": "A unique identifier of the invoked serverless function.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "name": {
+ "description": "The lambda function name.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "trigger": {
+ "description": "Trigger attributes.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "request_id": {
+ "description": "The id of the origin trigger request.",
+ "type": [
+ "null",
+ "string"
+ ]
+ },
+ "type": {
+ "description": "The trigger type.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "version": {
+ "description": "The lambda function version.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "id": {
+ "description": "ID holds the hex encoded 64 random bits ID of the event.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "links": {
+ "description": "Links holds links to other spans, potentially in other traces.",
+ "type": [
+ "null",
+ "array"
+ ],
+ "items": {
+ "type": "object",
+ "properties": {
+ "span_id": {
+ "description": "SpanID holds the ID of the linked span.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "trace_id": {
+ "description": "TraceID holds the ID of the linked span's trace.",
+ "type": "string",
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "span_id",
+ "trace_id"
+ ]
+ },
+ "minItems": 0
+ },
+ "marks": {
+ "description": "Marks capture the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent. Marks are only reported by RUM agents.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "object"
+ ],
+ "additionalProperties": {
+ "type": [
+ "null",
+ "number"
+ ]
+ }
+ }
+ },
+ "name": {
+ "description": "Name is the generic designation of a transaction in the scope of a single service, eg: 'GET /users/:id'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "otel": {
+ "description": "OTel contains unmapped OpenTelemetry attributes.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "attributes": {
+ "description": "Attributes hold the unmapped OpenTelemetry attributes.",
+ "type": [
+ "null",
+ "object"
+ ]
+ },
+ "span_kind": {
+ "description": "SpanKind holds the incoming OpenTelemetry span kind.",
+ "type": [
+ "null",
+ "string"
+ ]
+ }
+ }
+ },
+ "outcome": {
+ "description": "Outcome of the transaction with a limited set of permitted values, describing the success or failure of the transaction from the service's perspective. It is used for calculating error rates for incoming requests. Permitted values: success, failure, unknown.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "enum": [
+ "success",
+ "failure",
+ "unknown",
+ null
+ ]
+ },
+ "parent_id": {
+ "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "result": {
+ "description": "Result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.",
+ "type": [
+ "null",
+ "string"
+ ],
+ "maxLength": 1024
+ },
+ "sample_rate": {
+ "description": "SampleRate applied to the monitored service at the time where this transaction was recorded. Allowed values are [0..1]. A SampleRate \u003c1 indicates that not all spans are recorded.",
+ "type": [
+ "null",
+ "number"
+ ]
+ },
+ "sampled": {
+ "description": "Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.",
+ "type": [
+ "null",
+ "boolean"
+ ]
+ },
+ "session": {
+ "description": "Session holds optional transaction session information for RUM.",
+ "type": [
+ "null",
+ "object"
+ ],
+ "properties": {
+ "id": {
+ "description": "ID holds a session ID for grouping a set of related transactions.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "sequence": {
+ "description": "Sequence holds an optional sequence number for a transaction within a session. It is not meaningful to compare sequences across two different sessions.",
+ "type": [
+ "null",
+ "integer"
+ ],
+ "minimum": 1
+ }
+ },
+ "required": [
+ "id"
+ ]
+ },
+ "span_count": {
+ "description": "SpanCount counts correlated spans.",
+ "type": "object",
+ "properties": {
+ "dropped": {
+ "description": "Dropped is the number of correlated spans that have been dropped by the APM agent recording the transaction.",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "started": {
+ "description": "Started is the number of correlated spans that are recorded.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "started"
+ ]
+ },
+ "timestamp": {
+ "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch",
+ "type": [
+ "null",
+ "integer"
+ ]
+ },
+ "trace_id": {
+ "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.",
+ "type": "string",
+ "maxLength": 1024
+ },
+ "type": {
+ "description": "Type expresses the transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.",
+ "type": "string",
+ "maxLength": 1024
+ }
+ },
+ "required": [
+ "trace_id",
+ "id",
+ "type",
+ "span_count",
+ "duration"
+ ]
+}`
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive-widget.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive-widget.mdx
new file mode 100644
index 0000000000..5823498486
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive-widget.mdx
@@ -0,0 +1,32 @@
+
+import Go from './distributed-trace-receive/go.mdx'
+import Java from './distributed-trace-receive/java.mdx'
+import Net from './distributed-trace-receive/net.mdx'
+import Node from './distributed-trace-receive/node.mdx'
+import Php from './distributed-trace-receive/php.mdx'
+import Python from './distributed-trace-receive/python.mdx'
+import Ruby from './distributed-trace-receive/ruby.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/go.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/go.mdx
new file mode 100644
index 0000000000..054ac7e6c4
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/go.mdx
@@ -0,0 +1,31 @@
+
+
+
+{/* Need help with this example */}
+
+1. Parse the incoming `TraceContext` with
+ [`ParseTraceparentHeader`](https://godoc.org/go.elastic.co/apm/module/apmhttp#ParseTraceparentHeader) or
+ [`ParseTracestateHeader`](https://godoc.org/go.elastic.co/apm/module/apmhttp#ParseTracestateHeader).
+
+1. Start a new transaction or span as a child of the incoming transaction with
+ [`StartTransactionOptions`](((apm-go-ref))/api.html#tracer-api-start-transaction-options) or
+ [`StartSpanOptions`](((apm-go-ref))/api.html#transaction-start-span-options).
+
+
+
+Example:
+
+```go
+// Receive incoming TraceContext
+traceContext, _ := apmhttp.ParseTraceparentHeader(r.Header.Get("Traceparent")) [^1]
+traceContext.State, _ = apmhttp.ParseTracestateHeader(r.Header["Tracestate"]...) [^2]
+
+opts := apm.TransactionOptions{
+ TraceContext: traceContext, [^3]
+}
+transaction := apm.DefaultTracer.StartTransactionOptions("GET /", "request", opts) [^4]
+```
+[^1]: Parse the `TraceParent` header
+[^2]: Parse the `Tracestate` header
+[^3]: Set the parent trace context
+[^4]: Start a new transaction as a child of the received `TraceContext`
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/java.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/java.mdx
new file mode 100644
index 0000000000..c5cdd84e12
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/java.mdx
@@ -0,0 +1,36 @@
+
+
+
+1. Create a transaction as a child of the incoming transaction with
+ [`startTransactionWithRemoteParent()`](((apm-java-ref))/public-api.html#api-transaction-inject-trace-headers).
+
+1. Start and name the transaction with [`activate()`](((apm-java-ref))/public-api.html#api-transaction-activate)
+ and [`setName()`](((apm-java-ref))/public-api.html#api-set-name).
+
+
+
+Example:
+
+```java
+// Hook into a callback provided by the framework that is called on incoming requests
+public Response onIncomingRequest(Request request) throws Exception {
+ // creates a transaction representing the server-side handling of the request
+ Transaction transaction = ElasticApm.startTransactionWithRemoteParent(request::getHeader, request::getHeaders); [^1]
+ try (final Scope scope = transaction.activate()) { [^2]
+ String name = "a useful name like ClassName#methodName where the request is handled";
+ transaction.setName(name); [^3]
+ transaction.setType(Transaction.TYPE_REQUEST); [^4]
+ return request.handle();
+ } catch (Exception e) {
+ transaction.captureException(e);
+ throw e;
+ } finally {
+ transaction.end(); [^5]
+ }
+}
+```
+[^1]: Create a transaction as the child of a remote parent
+[^2]: Activate the transaction
+[^3]: Name the transaction
+[^4]: Add a transaction type
+[^5]: Eventually, end the transaction
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/net.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/net.mdx
new file mode 100644
index 0000000000..65dd04cdca
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/net.mdx
@@ -0,0 +1,15 @@
+
+
+
+Deserialize the incoming distributed tracing context, and pass it to any of the
+[`StartTransaction`](((apm-dotnet-ref))/public-api.html#api-start-transaction) or
+[`CaptureTransaction`](((apm-dotnet-ref))/public-api.html#convenient-capture-transaction) APIs —
+all of which have an optional `DistributedTracingData` parameter.
+This will create a new transaction or span as a child of the incoming trace context.
+
+Example starting a new transaction:
+
+```csharp
+var transaction2 = Agent.Tracer.StartTransaction("Transaction2", "TestTransaction",
+ DistributedTracingData.TryDeserializeFromString(serializedDistributedTracingData));
+```
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/node.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/node.mdx
new file mode 100644
index 0000000000..21e816756d
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/node.mdx
@@ -0,0 +1,19 @@
+
+
+
+1. Decode and store the `traceparent` in the receiving service.
+
+1. Pass in the `traceparent` as the `childOf` option to manually start a new transaction
+ as a child of the received `traceparent` with
+ [`apm.startTransaction()`](((apm-node-ref))/agent-api.html#apm-start-transaction).
+
+
+
+Example receiving a `traceparent` over raw UDP:
+
+```js
+const traceparent = readTraceparentFromUDPPacket() [^1]
+agent.startTransaction('my-service-b-transaction', { childOf: traceparent }) [^2]
+```
+[^1]: Read the `traceparent` from the incoming request.
+[^2]: Use the `traceparent` to initialize a new transaction that is a child of the original `traceparent`.
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/php.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/php.mdx
new file mode 100644
index 0000000000..1bd497c7b3
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/php.mdx
@@ -0,0 +1,26 @@
+
+1. Receive the distributed tracing data on the server side.
+
+1. Begin a new transaction using the agent's public API. For example, use [`ElasticApm::beginCurrentTransaction`](((apm-php-ref))/public-api.html#api-elasticapm-class-begin-current-transaction)
+ and pass the received distributed tracing data (serialized as string) as a parameter.
+ This will create a new transaction as a child of the incoming trace context.
+
+1. Don't forget to eventually end the transaction on the server side.
+
+
+
+Example:
+
+```php
+$receiverTransaction = ElasticApm::beginCurrentTransaction( [^1]
+ 'GET /data-api',
+ 'data-layer',
+ /* timestamp */ null,
+ $distDataAsString [^2]
+);
+```
+[^1]: Start a new transaction
+[^2]: Pass in the received distributed tracing data (serialized as string)
+
+Once this new transaction has been created in the receiving service,
+you can create child spans, or use any other agent API methods as you typically would.
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/python.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/python.mdx
new file mode 100644
index 0000000000..c3e7aade68
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/python.mdx
@@ -0,0 +1,21 @@
+
+
+
+1. Create a `TraceParent` object from a string or HTTP header.
+
+1. Start a new transaction as a child of the `TraceParent` by passing in a `TraceParent` object.
+
+
+
+Example using HTTP headers:
+
+```python
+parent = elasticapm.trace_parent_from_headers(headers_dict) [^1]
+client.begin_transaction('processors', trace_parent=parent) [^2]
+```
+[^1]: Create a `TraceParent` object from HTTP headers formed as a dictionary
+[^2]: Begin a new transaction as a child of the received `TraceParent`
+
+
+See the [`TraceParent` API](((apm-py-ref))/api.html#traceparent-api) for additional examples.
+
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/ruby.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/ruby.mdx
new file mode 100644
index 0000000000..c5adb62716
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-receive/ruby.mdx
@@ -0,0 +1,22 @@
+
+
+
+Start a new transaction or span as a child of the incoming transaction or span with
+[`with_transaction`](((apm-ruby-ref))/api.html#api-agent-with_transaction) or
+[`with_span`](((apm-ruby-ref))/api.html#api-agent-with_span).
+
+Example:
+
+```ruby
+# env being a Rack env
+context = ElasticAPM::TraceContext.parse(env: env) [^1]
+
+ElasticAPM.with_transaction("Do things", trace_context: context) do [^2]
+ ElasticAPM.with_span("Do nested thing", trace_context: context) do [^3]
+ end
+end
+```
+[^1]: Parse the incoming `TraceContext`
+[^2]: Create a transaction as a child of the incoming `TraceContext`
+[^3]: Create a span as a child of the newly created transaction. `trace_context` is optional here,
+as spans are automatically created as a child of their parent's transaction's `TraceContext` when none is passed.
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send-widget.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send-widget.mdx
new file mode 100644
index 0000000000..6a926c07e4
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send-widget.mdx
@@ -0,0 +1,32 @@
+
+import Go from './distributed-trace-send/go.mdx'
+import Java from './distributed-trace-send/java.mdx'
+import Net from './distributed-trace-send/net.mdx'
+import Node from './distributed-trace-send/node.mdx'
+import Php from './distributed-trace-send/php.mdx'
+import Python from './distributed-trace-send/python.mdx'
+import Ruby from './distributed-trace-send/ruby.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/go.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/go.mdx
new file mode 100644
index 0000000000..ea05ba3a89
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/go.mdx
@@ -0,0 +1,26 @@
+
+
+
+1. Start a transaction with
+ [`StartTransaction`](((apm-go-ref))/api.html#tracer-api-start-transaction) or a span with
+ [`StartSpan`](((apm-go-ref))/api.html#transaction-start-span).
+
+1. Get the active `TraceContext`.
+
+1. Send the `TraceContext` to the receiving service.
+
+
+
+Example:
+
+```go
+transaction := apm.DefaultTracer.StartTransaction("GET /", "request") [^1]
+traceContext := transaction.TraceContext() [^2]
+
+// Send TraceContext to receiving service
+traceparent := apmhttp.FormatTraceparentHeader(traceContext)) [^3]
+tracestate := traceContext.State.String()
+```
+[^1]: Start a transaction
+[^2]: Get `TraceContext` from current Transaction
+[^3]: Format the `TraceContext` or `tracestate` as a `traceparent` header.
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/java.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/java.mdx
new file mode 100644
index 0000000000..77051e5a2f
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/java.mdx
@@ -0,0 +1,33 @@
+
+
+
+1. Start a transaction with [`startTransaction`](((apm-java-ref))/public-api.html#api-start-transaction),
+ or a span with [`startSpan`](((apm-java-ref))/public-api.html#api-span-start-span).
+
+1. Inject the `traceparent` header into the request object with
+ [`injectTraceHeaders`](((apm-java-ref))/public-api.html#api-transaction-inject-trace-headers)
+
+
+
+Example of manually instrumenting an RPC framework:
+
+```java
+// Hook into a callback provided by the RPC framework that is called on outgoing requests
+public Response onOutgoingRequest(Request request) throws Exception {
+ Span span = ElasticApm.currentSpan() [^1]
+ .startSpan("external", "http", null)
+ .setName(request.getMethod() + " " + request.getHost());
+ try (final Scope scope = transaction.activate()) {
+ span.injectTraceHeaders((name, value) -> request.addHeader(name, value)); [^2]
+ return request.execute();
+ } catch (Exception e) {
+ span.captureException(e);
+ throw e;
+ } finally {
+ span.end(); [^3]
+ }
+}
+```
+[^1]: Create a span representing an external call
+[^2]: Inject the `traceparent` header into the request object
+[^3]: End the span
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/net.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/net.mdx
new file mode 100644
index 0000000000..4a6e877bee
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/net.mdx
@@ -0,0 +1,19 @@
+
+
+
+1. Serialize the distributed tracing context of the active transaction or span with
+ [`CurrentTransaction`](((apm-dotnet-ref))/public-api.html#api-current-transaction) or
+ [`CurrentSpan`](((apm-dotnet-ref))/public-api.html#api-current-span).
+
+1. Send the serialized context the receiving service.
+
+
+
+Example:
+
+```csharp
+string outgoingDistributedTracingData =
+ (Agent.Tracer.CurrentSpan?.OutgoingDistributedTracingData
+ ?? Agent.Tracer.CurrentTransaction?.OutgoingDistributedTracingData)?.SerializeToString();
+// Now send `outgoingDistributedTracingData` to the receiving service
+```
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/node.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/node.mdx
new file mode 100644
index 0000000000..ed74e73e51
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/node.mdx
@@ -0,0 +1,23 @@
+
+
+
+1. Start a transaction with [`apm.startTransaction()`](((apm-node-ref))/agent-api.html#apm-start-transaction),
+ or a span with [`apm.startSpan()`](((apm-node-ref))/agent-api.html#apm-start-span).
+
+1. Get the serialized `traceparent` string of the started transaction/span with
+ [`currentTraceparent`](((apm-node-ref))/agent-api.html#apm-current-traceparent).
+
+1. Encode the `traceparent` and send it to the receiving service inside your regular request.
+
+
+
+Example using raw UDP to communicate between two services, A and B:
+
+```js
+agent.startTransaction('my-service-a-transaction'); [^1]
+const traceparent = agent.currentTraceparent; [^2]
+sendMetadata(`traceparent: ${traceparent}\n`); [^3]
+```
+[^1]: Start a transaction
+[^2]: Get the current `traceparent`
+[^3]: Send the `traceparent` as a header to service B.
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/php.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/php.mdx
new file mode 100644
index 0000000000..8863b55059
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/php.mdx
@@ -0,0 +1,15 @@
+
+
+
+1. On the client side (i.e., the side sending the request) get the current distributed tracing context.
+
+1. Serialize the current distributed tracing context to a format supported by the request's transport and send it to the server side (i.e., the side receiving the request).
+
+
+
+Example:
+
+```php
+$distDataAsString = ElasticApm::getSerializedCurrentDistributedTracingData(); [^1]
+```
+[^1]: Get the current distributed tracing data serialized as string
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/python.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/python.mdx
new file mode 100644
index 0000000000..a4a853c2c6
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/python.mdx
@@ -0,0 +1,22 @@
+
+
+
+1. Start a transaction with [`begin_transaction()`](((apm-py-ref))/api.html#client-api-begin-transaction).
+
+1. Get the `trace_parent` of the active transaction.
+
+1. Send the `trace_parent` to the receiving service.
+
+
+
+Example:
+
+```python
+client.begin_transaction('new-transaction') [^1]
+
+elasticapm.get_trace_parent_header('new-transaction') [^2]
+
+# Send `trace_parent_str` to another service
+```
+[^1]: Start a new transaction
+[^2]: Return the string representation of the current transaction's `TraceParent` object
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/ruby.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/ruby.mdx
new file mode 100644
index 0000000000..eca0903936
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/distributed-trace-send/ruby.mdx
@@ -0,0 +1,21 @@
+
+
+
+1. Start a span with [`with_span`](((apm-ruby-ref))/api.html#api-agent-with_span).
+
+1. Get the active `TraceContext`.
+
+1. Send the `TraceContext` to the receiving service.
+
+
+
+Example:
+
+```ruby
+ElasticAPM.with_span "Name" do |span| [^1]
+ header = span.trace_context.traceparent.to_header [^2]
+ # send the TraceContext Header to a receiving service...
+end
+```
+[^1]: Start a span
+[^2]: Get the `TraceContext`
diff --git a/docs/en/serverless/transclusion/apm/guide/tab-widgets/no-data-indexed/fleet-managed.mdx b/docs/en/serverless/transclusion/apm/guide/tab-widgets/no-data-indexed/fleet-managed.mdx
new file mode 100644
index 0000000000..7c0e5bfc55
--- /dev/null
+++ b/docs/en/serverless/transclusion/apm/guide/tab-widgets/no-data-indexed/fleet-managed.mdx
@@ -0,0 +1,16 @@
+
+**Are the URL and API key correct?**
+
+Double check that the intake URL and API key are correct in your APM agent configuration.
+Reference the relevant [((apm-agent)) documentation](((apm-agents-ref))/index.html) for details on how to set these configuration variables.
+
+To create a new API key, see .
+
+If you see requests coming through the managed intake service but they are not accepted (a response code other than `202`),
+see managed intake service response codes to narrow down the possible causes.
+
+**Are there instrumentation gaps?**
+
+APM agents provide auto-instrumentation for many popular frameworks and libraries.
+If the ((apm-agent)) is not auto-instrumenting something that you were expecting, data won't be sent to Elastic.
+Reference the relevant [((apm-agent)) documentation](((apm-agents-ref))/index.html) for details on what is automatically instrumented.
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/download-widget.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/download-widget.mdx
new file mode 100644
index 0000000000..666f90a27b
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/download-widget.mdx
@@ -0,0 +1,24 @@
+
+import Mac from './download/mac.mdx'
+import Linux from './download/linux.mdx'
+import Win from './download/win.mdx'
+import Deb from './download/deb.mdx'
+import Rpm from './download/rpm.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/download/deb.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/download/deb.mdx
new file mode 100644
index 0000000000..dc0e25553e
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/download/deb.mdx
@@ -0,0 +1,10 @@
+
+
+To simplify upgrading to future versions of ((agent)), we recommended
+that you use the tarball distribution instead of the DEB distribution.
+
+
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-((version))-amd64.deb
+sudo dpkg -i elastic-agent-((version))-amd64.deb
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/download/linux.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/download/linux.mdx
new file mode 100644
index 0000000000..ea256b0146
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/download/linux.mdx
@@ -0,0 +1,5 @@
+
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-((version))-linux-x86_64.tar.gz
+tar xzvf elastic-agent-((version))-linux-x86_64.tar.gz
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/download/mac.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/download/mac.mdx
new file mode 100644
index 0000000000..c62795b922
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/download/mac.mdx
@@ -0,0 +1,5 @@
+
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-((version))-darwin-x86_64.tar.gz
+tar xzvf elastic-agent-((version))-darwin-x86_64.tar.gz
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/download/rpm.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/download/rpm.mdx
new file mode 100644
index 0000000000..586b8ad1ca
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/download/rpm.mdx
@@ -0,0 +1,10 @@
+
+
+To simplify upgrading to future versions of ((agent)), we recommended
+that you use the tarball distribution instead of the RPM distribution.
+
+
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-((version))-x86_64.rpm
+sudo rpm -vi elastic-agent-((version))-x86_64.rpm
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/download/win.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/download/win.mdx
new file mode 100644
index 0000000000..7306d3be5c
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/download/win.mdx
@@ -0,0 +1,12 @@
+
+```powershell
+# PowerShell 5.0+
+wget https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-((version))-windows-x86_64.zip -OutFile elastic-agent-((version))-windows-x86_64.zip
+Expand-Archive .\elastic-agent-((version))-windows-x86_64.zip
+```
+Or manually:
+
+1. Download the ((agent)) Windows zip file from the
+ [download page](https://www.elastic.co/downloads/beats/elastic-agent).
+
+1. Extract the contents of the zip file.
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone-widget.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone-widget.mdx
new file mode 100644
index 0000000000..f61e8a780c
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone-widget.mdx
@@ -0,0 +1,24 @@
+
+import Deb from './run-standalone/content/deb.mdx'
+import Linux from './run-standalone/content/linux.mdx'
+import Mac from './run-standalone/content/mac.mdx'
+import Rpm from './run-standalone/content/rpm.mdx'
+import Win from './run-standalone/content/win.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/deb.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/deb.mdx
new file mode 100644
index 0000000000..64b8f68795
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/deb.mdx
@@ -0,0 +1,13 @@
+
+
+You must run this command as the root user because some
+integrations require root privileges to collect sensitive data.
+
+
+```shell
+sudo systemctl enable elastic-agent [^1]
+sudo systemctl start elastic-agent
+```
+[^1]: The DEB package includes a service unit for Linux systems with systemd. On
+these systems, you can manage ((agent)) by using the usual systemd commands. If
+you don't have systemd, run `sudo service elastic-agent start`.
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/linux.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/linux.mdx
new file mode 100644
index 0000000000..ca3cb05e7c
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/linux.mdx
@@ -0,0 +1,9 @@
+
+
+You must run this command as the root user because some
+integrations require root privileges to collect sensitive data.
+
+
+```shell
+sudo ./elastic-agent install
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/mac.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/mac.mdx
new file mode 100644
index 0000000000..ca3cb05e7c
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/mac.mdx
@@ -0,0 +1,9 @@
+
+
+You must run this command as the root user because some
+integrations require root privileges to collect sensitive data.
+
+
+```shell
+sudo ./elastic-agent install
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/rpm.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/rpm.mdx
new file mode 100644
index 0000000000..2f802a5d2d
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/rpm.mdx
@@ -0,0 +1,13 @@
+
+
+You must run this command as the root user because some
+integrations require root privileges to collect sensitive data.
+
+
+```shell
+sudo systemctl enable elastic-agent [^1]
+sudo systemctl start elastic-agent
+```
+[^1]: The RPM package includes a service unit for Linux systems with systemd. On
+these systems, you can manage ((agent)) by using the usual systemd commands. If
+you don't have systemd, run `sudo service elastic-agent start`.
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/win.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/win.mdx
new file mode 100644
index 0000000000..b824e4e15a
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/run-standalone/content/win.mdx
@@ -0,0 +1,11 @@
+
+
+Open a PowerShell prompt as an Administrator (right-click the PowerShell icon
+and select **Run As Administrator**).
+
+From the PowerShell prompt, change to the directory where you installed ((agent)),
+and run:
+
+```shell
+.\elastic-agent.exe install
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/start-widget.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/start-widget.mdx
new file mode 100644
index 0000000000..2d2883c17e
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/start-widget.mdx
@@ -0,0 +1,24 @@
+
+import Deb from './start/deb.mdx'
+import Linux from './start/linux.mdx'
+import Mac from './start/mac.mdx'
+import Rpm from './start/rpm.mdx'
+import Win from './start/win.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/start/deb.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/start/deb.mdx
new file mode 100644
index 0000000000..802c7dfa95
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/start/deb.mdx
@@ -0,0 +1,19 @@
+
+
+
+The DEB package includes a service unit for Linux systems with systemd. On these
+systems, you can manage ((agent)) by using the usual systemd commands.
+
+{/* tag::start-command[] */}
+Use `systemctl` to start the agent:
+
+```shell
+sudo systemctl start elastic-agent
+```
+
+Otherwise, use:
+
+```shell
+sudo service elastic-agent start
+```
+{/* end::start-command[] */}
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/start/linux.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/start/linux.mdx
new file mode 100644
index 0000000000..83f9e5b1d7
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/start/linux.mdx
@@ -0,0 +1,5 @@
+
+
+```shell
+sudo service elastic-agent start
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/start/mac.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/start/mac.mdx
new file mode 100644
index 0000000000..bd25182a0f
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/start/mac.mdx
@@ -0,0 +1,5 @@
+
+
+```shell
+sudo launchctl load /Library/LaunchDaemons/co.elastic.elastic-agent.plist
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/start/rpm.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/start/rpm.mdx
new file mode 100644
index 0000000000..ba66930736
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/start/rpm.mdx
@@ -0,0 +1,18 @@
+
+
+The RPM package includes a service unit for Linux systems with systemd. On these
+systems, you can manage ((agent)) by using the usual systemd commands.
+
+{/* tag::start-command[] */}
+Use `systemctl` to start the agent:
+
+```shell
+sudo systemctl start elastic-agent
+```
+
+Otherwise, use:
+
+```shell
+sudo service elastic-agent start
+```
+{/* end::start-command[] */}
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/start/win.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/start/win.mdx
new file mode 100644
index 0000000000..f036e6eaf3
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/start/win.mdx
@@ -0,0 +1,5 @@
+
+
+```shell
+Start-Service Elastic Agent
+```
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/stop-widget.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/stop-widget.mdx
new file mode 100644
index 0000000000..7d360db28f
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/stop-widget.mdx
@@ -0,0 +1,24 @@
+
+import Deb from './stop/deb.mdx'
+import Linux from './stop/linux.mdx'
+import Mac from './stop/mac.mdx'
+import Rpm from './stop/rpm.mdx'
+import Win from './stop/win.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/stop/deb.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/deb.mdx
new file mode 100644
index 0000000000..8615ba363c
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/deb.mdx
@@ -0,0 +1,24 @@
+
+
+
+The DEB package includes a service unit for Linux systems with systemd. On these
+systems, you can manage ((agent)) by using the usual systemd commands.
+
+{/* tag::stop-command[] */}
+Use `systemctl` to stop the agent:
+
+```shell
+sudo systemctl stop elastic-agent
+```
+
+Otherwise, use:
+
+```shell
+sudo service elastic-agent stop
+```
+
+
+((agent)) will restart automatically if the system is rebooted.
+
+
+{/* end::stop-command[] */}
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/stop/linux.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/linux.mdx
new file mode 100644
index 0000000000..1d4bf8982d
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/linux.mdx
@@ -0,0 +1,10 @@
+
+
+```shell
+sudo service elastic-agent stop
+```
+
+
+((agent)) will restart automatically if the system is rebooted.
+
+
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/stop/mac.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/mac.mdx
new file mode 100644
index 0000000000..5a6b734fc6
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/mac.mdx
@@ -0,0 +1,10 @@
+
+
+```shell
+sudo launchctl unload /Library/LaunchDaemons/co.elastic.elastic-agent.plist
+```
+
+
+((agent)) will restart automatically if the system is rebooted.
+
+
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/stop/rpm.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/rpm.mdx
new file mode 100644
index 0000000000..5c87b4d5f5
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/rpm.mdx
@@ -0,0 +1,23 @@
+
+
+The RPM package includes a service unit for Linux systems with systemd. On these
+systems, you can manage ((agent)) by using the usual systemd commands.
+
+{/* tag::stop-command[] */}
+Use `systemctl` to stop the agent:
+
+```shell
+sudo systemctl stop elastic-agent
+```
+
+Otherwise, use:
+
+```shell
+sudo service elastic-agent stop
+```
+
+
+((agent)) will restart automatically if the system is rebooted.
+
+
+{/* end::stop-command[] */}
diff --git a/docs/en/serverless/transclusion/fleet/tab-widgets/stop/win.mdx b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/win.mdx
new file mode 100644
index 0000000000..44725fa737
--- /dev/null
+++ b/docs/en/serverless/transclusion/fleet/tab-widgets/stop/win.mdx
@@ -0,0 +1,13 @@
+
+
+```shell
+Stop-Service Elastic Agent
+```
+
+If necessary, use Task Manager on Windows to stop ((agent)). This will kill the
+`elastic-agent` process and any sub-processes it created (such as ((beats))).
+
+
+((agent)) will restart automatically if the system is rebooted.
+
+
diff --git a/docs/en/serverless/transclusion/host-details.mdx b/docs/en/serverless/transclusion/host-details.mdx
new file mode 100644
index 0000000000..1544498e4b
--- /dev/null
+++ b/docs/en/serverless/transclusion/host-details.mdx
@@ -0,0 +1,92 @@
+{/* This is collapsed by default */}
+
+
+
+![Host metrics](../images/metrics-overlay.png)
+
+The **Overview** tab displays metrics about the selected host, including CPU usage,
+normalized load, memory usage, disk usage, network traffic, and the log rate.
+
+Change the time range to view metrics over a specific period of time.
+
+Hover over a specific time period on a chart to compare the various metrics at that given time.
+
+Expand the **Alerts** section to see alerts related to the selected host.
+
+
+
+
+
+![Host metadata](../images/metadata-overlay.png)
+
+The **Metadata** tab lists all the meta information relating to the host,
+including host, cloud, and agent information.
+
+This information can help when investigating events—for example,
+when filtering by operating system or architecture.
+
+
+
+
+
+![Host processes](../images/processes-overlay.png)
+
+The **Processes** tab lists the total number of processes (`system.process.summary.total`) running on the host,
+along with the total number of processes in these various states:
+
+* Running (`system.process.summary.running`)
+* Sleeping (`system.process.summary.sleeping`)
+* Stopped (`system.process.summary.stopped`)
+* Idle (`system.process.summary.idle`)
+* Dead (`system.process.summary.dead`)
+* Zombie (`system.process.summary.zombie`)
+* Unknown (`system.process.summary.unknown`)
+
+The processes listed in the **Top processes** table are based on an aggregation of the top CPU and the top memory consuming processes.
+The number of top processes is controlled by `process.include_top_n.by_cpu` and `process.include_top_n.by_memory`.
+
+| | |
+|---|---|
+| **Command** | Full command line that started the process, including the absolute path to the executable, and all the arguments (`system.process.cmdline`). |
+| **PID** | Process id (`process.pid`). |
+| **User** | User name (`user.name`). |
+| **CPU** | The percentage of CPU time spent by the process since the last event (`system.process.cpu.total.pct`). |
+| **Time** | The time the process started (`system.process.cpu.start_time`). |
+| **Memory** | The percentage of memory (`system.process.memory.rss.pct`) the process occupied in main memory (RAM). |
+| **State** | The current state of the process and the total number of processes (`system.process.state`). Expected values are: `running`, `sleeping`, `dead`, `stopped`, `idle`, `zombie`, and `unknown`. |
+
+
+
+
+
+![Host logs](../images/logs-overlay.png)
+
+The **Logs** tab displays logs relating to the host that you have selected. By default, the logs tab displays the following columns.
+
+| | |
+|---|---|
+| **Timestamp** | The timestamp of the log entry from the `timestamp` field. |
+| **Message** | The message extracted from the document. The content of this field depends on the type of log message. If no special log message type is detected, the [Elastic Common Schema (ECS)](((ecs-ref))/ecs-base.html) base field, `message`, is used. |
+
+To view the logs in the ((logs-app)) for a detailed analysis, click **Open in Logs**.
+
+
+
+
+
+![Anomalies](../images/anomalies-overlay.png)
+
+The **Anomalies** table displays a list of each single metric ((anomaly-detect)) job for the specific host. By default, anomaly
+jobs are sorted by time, showing the most recent jobs first.
+
+Along with the name of each anomaly job, detected anomalies with a severity score equal to 50 or higher are listed. These
+scores represent a severity of "warning" or higher in the selected time period. The **summary** value represents the increase between
+the actual value and the expected ("typical") value of the host metric in the anomaly record result.
+
+To drill down and analyze the metric anomaly, select **Actions** → **Open in Anomaly Explorer**.
+You can also select **Actions** → **Show in Inventory** to view the host Inventory page, filtered by the specific metric.
+
+
+
+{/* TODO: Find out if OSQuery tab will be included in serverless. It does not currently appear in serverless builds */}
+
diff --git a/docs/en/serverless/transclusion/kibana/apm/service-overview/dependencies.mdx b/docs/en/serverless/transclusion/kibana/apm/service-overview/dependencies.mdx
new file mode 100644
index 0000000000..f841ed1a6c
--- /dev/null
+++ b/docs/en/serverless/transclusion/kibana/apm/service-overview/dependencies.mdx
@@ -0,0 +1,9 @@
+
+
+The **Dependencies** table displays a list of downstream services or external connections relevant
+to the service at the selected time range. The table displays latency, throughput, failed transaction rate, and the impact of
+each dependency. By default, dependencies are sorted by _Impact_ to show the most used and the slowest dependency.
+If there is a particular dependency you are interested in, click **View dependencies** to learn more about it.
+
+{/* TODO: FIX THIS IMAGE
+![Dependencies view in the Applications UI](../../../../images/dependencies/spans-dependencies.png) */}
diff --git a/docs/en/serverless/transclusion/kibana/apm/service-overview/ftr.mdx b/docs/en/serverless/transclusion/kibana/apm/service-overview/ftr.mdx
new file mode 100644
index 0000000000..e137f47f3f
--- /dev/null
+++ b/docs/en/serverless/transclusion/kibana/apm/service-overview/ftr.mdx
@@ -0,0 +1,16 @@
+
+
+The failed transaction rate represents the percentage of failed transactions from the perspective of the selected service.
+It's useful for visualizing unexpected increases, decreases, or irregular patterns in a service's transactions.
+
+
+
+HTTP **transactions** from the HTTP server perspective do not consider a `4xx` status code (client error) as a failure
+because the failure was caused by the caller, not the HTTP server. Thus, `event.outcome=success` and there will be no increase in failed transaction rate.
+
+HTTP **spans** from the client perspective however, are considered failures if the HTTP status code is ≥ 400.
+These spans will set `event.outcome=failure` and increase the failed transaction rate.
+
+If there is no HTTP status, both transactions and spans are considered successful unless an error is reported.
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/kibana/apm/service-overview/throughput-transactions.mdx b/docs/en/serverless/transclusion/kibana/apm/service-overview/throughput-transactions.mdx
new file mode 100644
index 0000000000..07d79cf2dc
--- /dev/null
+++ b/docs/en/serverless/transclusion/kibana/apm/service-overview/throughput-transactions.mdx
@@ -0,0 +1,14 @@
+
+
+The **Throughput** chart visualizes the average number of transactions per minute for the selected service.
+
+The **Transactions** table displays a list of _transaction groups_ for the
+selected service and includes the latency, traffic, error rate, and the impact for each transaction.
+Transactions that share the same name are grouped, and only one entry is displayed for each group.
+
+By default, transaction groups are sorted by _Impact_ to show the most used and slowest endpoints in your
+service. If there is a particular endpoint you are interested in, click **View transactions** to view a
+list of similar transactions on the transactions overview page.
+
+{/* TODO: Figure out this image
+![Traffic and transactions](../../../../images/services/traffic-transactions.png) */}
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/kibana/logs/log-overview.mdx b/docs/en/serverless/transclusion/kibana/logs/log-overview.mdx
new file mode 100644
index 0000000000..17edc1fd1f
--- /dev/null
+++ b/docs/en/serverless/transclusion/kibana/logs/log-overview.mdx
@@ -0,0 +1,8 @@
+
+
+Logs provide detailed information about specific events, and are crucial to successfully debugging slow or erroneous transactions.
+
+If you've correlated your application's logs and traces, you never have to search for relevant data; it's already available to you. Viewing log and trace data together allows you to quickly diagnose and solve problems.
+
+To learn how to correlate your logs with your instrumented services,
+refer to .
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/application-logs/apm-agent-log-sending.mdx b/docs/en/serverless/transclusion/observability/application-logs/apm-agent-log-sending.mdx
new file mode 100644
index 0000000000..1545c47fbf
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/application-logs/apm-agent-log-sending.mdx
@@ -0,0 +1,25 @@
+
+
+Elastic APM agents can automatically capture and send logs directly to the managed intake service — enabling you to
+easily ingest log events without needing a separate log shipper like ((filebeat)) or ((agent)).
+
+**Supported APM agents/languages**
+
+* Java
+
+**Requirements**
+
+The Elastic APM agent for Java.
+
+**Pros**
+
+* Simple to set up as it only relies on the APM agent.
+* No modification of the application required.
+* No need to deploy ((filebeat)).
+* No need to store log files in the file system.
+
+**Cons**
+
+* Experimental feature.
+* Limited APM agent support.
+* Not resilient to outages. Log messages can be dropped when buffered in the agent or in the managed intake service.
diff --git a/docs/en/serverless/transclusion/observability/application-logs/correlate-logs.mdx b/docs/en/serverless/transclusion/observability/application-logs/correlate-logs.mdx
new file mode 100644
index 0000000000..46eac9ed79
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/application-logs/correlate-logs.mdx
@@ -0,0 +1,15 @@
+
+Correlate your application logs with trace events to:
+
+* view the context of a log and the parameters provided by a user
+* view all logs belonging to a particular trace
+* easily move between logs and traces when debugging application issues
+
+Learn more about log correlation in the agent-specific ingestion guides:
+
+* [Go](((apm-go-ref))/logs.html)
+* [Java](((apm-java-ref))/logs.html#log-correlation-ids)
+* [.NET](((apm-dotnet-ref))/log-correlation.html)
+* [Node.js](((apm-node-ref))/log-correlation.html)
+* [Python](((apm-py-ref))/logs.html#log-correlation-ids)
+* [Ruby](((apm-ruby-ref))/log-correlation.html)
diff --git a/docs/en/serverless/transclusion/observability/application-logs/ecs-logging-logs.mdx b/docs/en/serverless/transclusion/observability/application-logs/ecs-logging-logs.mdx
new file mode 100644
index 0000000000..7c630454ed
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/application-logs/ecs-logging-logs.mdx
@@ -0,0 +1,24 @@
+
+
+Elastic Common Schema (ECS) loggers format your logs into ECS-compatible JSON,
+removing the need to manually parse logs.
+
+**Requirements**
+
+* (Optional) Elastic APM agent for your programming language (for log correlation)
+* The Elastic ECS logger for your language or framework
+* ((filebeat)) configured to monitor and capture application logs
+
+**Pros**
+
+* Popular logging frameworks supported
+* Simplicity: no manual parsing with ((filebeat)), and a configuration can be reused across applications
+* Decently human-readable JSON structure
+* APM log correlation
+* Resilient in case of outages
+
+**Cons**
+
+* Not all frameworks are supported
+* Requires modification of the application and its log configuration
+
diff --git a/docs/en/serverless/transclusion/observability/application-logs/log-reformatting.mdx b/docs/en/serverless/transclusion/observability/application-logs/log-reformatting.mdx
new file mode 100644
index 0000000000..5061a7431c
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/application-logs/log-reformatting.mdx
@@ -0,0 +1,29 @@
+
+
+Elastic APM agents can automatically reformat application logs to Elastic Common Schema (ECS) format
+without needing to add an ECS logger dependency or modify the application.
+
+**Requirements**
+
+* The Elastic APM agent for your programming language
+* ((filebeat)) configured to monitor and capture application logs
+
+**Pros**
+
+All the benefits of using ECS logging, without having to modify the application or its configuration:
+
+* Simplicity: no manual parsing with ((filebeat)), and a configuration can be reused across applications
+* Decently human-readable JSON structure
+* APM log correlation
+
+**Cons**
+
+* Requires an Elastic APM agent
+* Not all APM agents support this feature
+
+**Supported APM agents/languages**
+
+* Ruby
+* Python
+* Java
+
diff --git a/docs/en/serverless/transclusion/observability/application-logs/plaintext-logs.mdx b/docs/en/serverless/transclusion/observability/application-logs/plaintext-logs.mdx
new file mode 100644
index 0000000000..d0aad15b40
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/application-logs/plaintext-logs.mdx
@@ -0,0 +1,22 @@
+
+
+Use ((filebeat)) to parse and ingest raw, plain-text application logs.
+
+**Requirements**
+
+* (Optional) Elastic APM agent for your programming language (for log correlation)
+* Raw, plain-text application logs stored on the file system
+* ((filebeat)) configured to monitor and capture application logs
+
+**Pros**
+
+* All programming languages/frameworks are supported
+* Existing application logs can be ingested
+* Does not require modification of the application or its configuration, unless log correlation is required
+
+**Cons**
+
+* Must parse application logs to be useful—meaning writing and maintaining Grok patterns and spending CPU cycles on parsing
+* Parsing is tied to the application log format, meaning it can differ per application and needs to be maintained over time
+* Log correlation requires modifying the application log format and inject IDs in log messages
+
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/deb.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/deb.mdx
new file mode 100644
index 0000000000..252be7ac06
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/deb.mdx
@@ -0,0 +1,4 @@
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-((version))-amd64.deb
+sudo dpkg -i filebeat-((version))-amd64.deb
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/linux.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/linux.mdx
new file mode 100644
index 0000000000..f3ce2b82c3
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/linux.mdx
@@ -0,0 +1,4 @@
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-((version))-linux-x86_64.tar.gz
+tar xzvf filebeat-((version))-linux-x86_64.tar.gz
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/macos.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/macos.mdx
new file mode 100644
index 0000000000..1ebda0ce8c
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/macos.mdx
@@ -0,0 +1,4 @@
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-((version))-darwin-x86_64.tar.gz
+tar xzvf filebeat-((version))-darwin-x86_64.tar.gz
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/rpm.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/rpm.mdx
new file mode 100644
index 0000000000..e0adfff0cc
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/rpm.mdx
@@ -0,0 +1,4 @@
+```sh
+curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-((version))-x86_64.rpm
+sudo rpm -vi filebeat-((version))-x86_64.rpm
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/windows.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/windows.mdx
new file mode 100644
index 0000000000..9a620b77ce
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/content/windows.mdx
@@ -0,0 +1,21 @@
+1. Download the ((filebeat)) Windows zip file: https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-((version))-windows-x86_64.zip
+
+1. Extract the contents of the zip file into `C:\Program Files`.
+
+1. Rename the `filebeat-((version))-windows-x86_64` directory to `((filebeat))`.
+
+1. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon
+and select **Run As Administrator**).
+
+1. From the PowerShell prompt, run the following commands to install
+((filebeat)) as a Windows service:
+
+ ```powershell
+ PS > cd 'C:\Program Files\((filebeat))'
+ PS C:\Program Files\((filebeat))> .\install-service-filebeat.ps1
+ ```
+
+If script execution is disabled on your system, you need to set the
+execution policy for the current session to allow the script to run. For
+example:
+`PowerShell.exe -ExecutionPolicy UnRestricted -File .\install-service-filebeat.ps1`.
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/widget.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/widget.mdx
new file mode 100644
index 0000000000..4e2364769c
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-install/widget.mdx
@@ -0,0 +1,23 @@
+import Deb from './content/deb.mdx'
+import Rpm from './content/rpm.mdx'
+import Mac from './content/macos.mdx'
+import Linux from './content/linux.mdx'
+import Windows from './content/windows.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/docker.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/docker.mdx
new file mode 100644
index 0000000000..bcdb7ad209
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/docker.mdx
@@ -0,0 +1,24 @@
+
+
+1. Make sure your application logs to stdout/stderr.
+
+1. Follow the [Run ((filebeat)) on Docker](((filebeat-ref))/running-on-docker.html) guide.
+
+1. Enable [hints-based autodiscover](((filebeat-ref))/configuration-autodiscover-hints.html).
+
+
+4. Add these labels to your containers that log using ECS-compatible JSON. This will make sure the logs are parsed appropriately. In `docker-compose.yml`:
+
+```yaml
+labels:
+ co.elastic.logs/json.overwrite_keys: true [^1]
+ co.elastic.logs/json.add_error_key: true [^2]
+ co.elastic.logs/json.expand_keys: true [^3]
+```
+[^1]: Values from the decoded JSON object overwrite the fields that ((filebeat)) normally adds (type, source, offset, etc.) in case of conflicts.
+[^2]: ((filebeat)) adds an "error.message" and "error.type: json" key in case of JSON unmarshalling errors.
+[^3]: ((filebeat)) will recursively de-dot keys in the decoded JSON, and expand them into a hierarchical object structure.
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/kubernetes.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/kubernetes.mdx
new file mode 100644
index 0000000000..529bddf42c
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/kubernetes.mdx
@@ -0,0 +1,26 @@
+
+
+1. Make sure your application logs to stdout/stderr.
+
+1. Follow the [Run ((filebeat)) on Kubernetes](((filebeat-ref))/running-on-kubernetes.html) guide.
+
+1. Enable [hints-based autodiscover](((filebeat-ref))/configuration-autodiscover-hints.html) (uncomment the corresponding section in `filebeat-kubernetes.yaml`).
+
+
+
+4. Add these annotations to your pods that log using ECS-compatible JSON. This will make sure the logs are parsed appropriately.
+
+ ```yaml
+ annotations:
+ co.elastic.logs/json.overwrite_keys: true [^1]
+ co.elastic.logs/json.add_error_key: true [^2]
+ co.elastic.logs/json.expand_keys: true [^3]
+ ```
+ [^1]: Values from the decoded JSON object overwrite the fields that ((filebeat)) normally adds (type, source, offset, etc.) in case of conflicts.
+ [^2]: ((filebeat)) adds an "error.message" and "error.type: json" key in case of JSON unmarshalling errors.
+ [^3]: ((filebeat)) will recursively de-dot keys in the decoded JSON, and expand them into a hierarchical object structure.
+
+
+
+
+
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/logs.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/logs.mdx
new file mode 100644
index 0000000000..1216e00539
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/content/logs.mdx
@@ -0,0 +1,48 @@
+
+1. Follow the [Filebeat quick start](((filebeat-ref))/filebeat-installation-configuration.html) to learn how to
+ install ((filebeat)) and connect to Elastic.
+
+
+1. Add the following configuration to your `filebeat.yaml` file to start collecting log data.
+
+```yaml
+filebeat.inputs:
+- type: filestream [^1]
+ paths: /path/to/logs.json
+ parsers:
+ - ndjson:
+ overwrite_keys: true [^2]
+ add_error_key: true [^3]
+ expand_keys: true [^4]
+ fields:
+ service.name: your_service_name [^5]
+ service.version: your_service_version [^5]
+ service.environment: your_service_environment [^5]
+
+processors: [^6]
+ - add_host_metadata: ~
+ - add_cloud_metadata: ~
+ - add_docker_metadata: ~
+ - add_kubernetes_metadata: ~
+```
+[^1]: Use the filestream input to read lines from active log files.
+[^2]: Values from the decoded JSON object overwrite the fields that ((filebeat)) normally adds (type, source, offset, etc.) in case of conflicts.
+[^3]: ((filebeat)) adds an "error.message" and "error.type: json" key in case of JSON unmarshalling errors.
+[^4]: ((filebeat)) will recursively de-dot keys in the decoded JSON, and expand them into a hierarchical object structure.
+[^5]: The `service.name` (required), `service.version` (optional) and `service.environment` (optional) of the service you're collecting logs from, used for Log correlation.
+[^6]: Processors enhance your data. See [processors](((filebeat-ref))/filtering-and-enhancing-data.html) to learn more.
+
+
+
+2. Configure `filebeat.yaml` file to start collecting log data.
+
+3. Add the following configuration to your `filebeat.yaml` file to start collecting log data.
+
+```yaml
+filebeat.inputs:
+- type: filestream [^1]
+ paths: /path/to/logs.log [^2]
+```
+[^1]: Reads lines from an active log file.
+[^2]: A list of glob-based paths that will be crawled and fetched.
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/widget.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/widget.mdx
new file mode 100644
index 0000000000..0ac6a99ace
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-logs/widget.mdx
@@ -0,0 +1,25 @@
+
+import ContentLogs from './content/logs.mdx'
+import ContentKubernetes from './content/kubernetes.mdx'
+import ContentDocker from './content/docker.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/deb.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/deb.mdx
new file mode 100644
index 0000000000..c2697bf5bc
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/deb.mdx
@@ -0,0 +1,3 @@
+```shell
+filebeat setup --index-management
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/linux.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/linux.mdx
new file mode 100644
index 0000000000..23e3f86a0d
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/linux.mdx
@@ -0,0 +1,3 @@
+```shell
+./filebeat setup --index-management
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/macos.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/macos.mdx
new file mode 100644
index 0000000000..23e3f86a0d
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/macos.mdx
@@ -0,0 +1,3 @@
+```shell
+./filebeat setup --index-management
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/rpm.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/rpm.mdx
new file mode 100644
index 0000000000..b641d78823
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/rpm.mdx
@@ -0,0 +1,3 @@
+```sh
+filebeat setup --index-management
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/windows.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/windows.mdx
new file mode 100644
index 0000000000..9d773278a5
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/content/windows.mdx
@@ -0,0 +1,3 @@
+```powershell
+PS > .\filebeat.exe setup --index-management
+```
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/widget.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/widget.mdx
new file mode 100644
index 0000000000..4e2364769c
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-setup/widget.mdx
@@ -0,0 +1,23 @@
+import Deb from './content/deb.mdx'
+import Rpm from './content/rpm.mdx'
+import Mac from './content/macos.mdx'
+import Linux from './content/linux.mdx'
+import Windows from './content/windows.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/deb.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/deb.mdx
new file mode 100644
index 0000000000..5ca29998ea
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/deb.mdx
@@ -0,0 +1,9 @@
+```shell
+sudo service filebeat start
+```
+
+
+If you use an init.d script to start ((filebeat)), you can't specify command line flags (refer to [Command reference](((filebeat-ref))/command-line-options.html)). To specify flags, start ((filebeat)) in the foreground.
+
+
+Also, refer to [((filebeat)) and systemd](((filebeat-ref))/running-with-systemd.html).
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/linux.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/linux.mdx
new file mode 100644
index 0000000000..fd3ea6e2f9
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/linux.mdx
@@ -0,0 +1,8 @@
+```shell
+sudo chown root filebeat.yml
+sudo ./filebeat -e
+```
+
+
+You'll be running ((filebeat)) as root, so you need to change ownership of the configuration file and any configurations enabled in the `modules.d` directory, or run ((filebeat)) with `--strict.perms=false` specified. Refer to [Config file ownership and permissions](((beats-ref))/config-file-permissions.html).
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/macos.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/macos.mdx
new file mode 100644
index 0000000000..fd3ea6e2f9
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/macos.mdx
@@ -0,0 +1,8 @@
+```shell
+sudo chown root filebeat.yml
+sudo ./filebeat -e
+```
+
+
+You'll be running ((filebeat)) as root, so you need to change ownership of the configuration file and any configurations enabled in the `modules.d` directory, or run ((filebeat)) with `--strict.perms=false` specified. Refer to [Config file ownership and permissions](((beats-ref))/config-file-permissions.html).
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/rpm.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/rpm.mdx
new file mode 100644
index 0000000000..5ca29998ea
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/rpm.mdx
@@ -0,0 +1,9 @@
+```shell
+sudo service filebeat start
+```
+
+
+If you use an init.d script to start ((filebeat)), you can't specify command line flags (refer to [Command reference](((filebeat-ref))/command-line-options.html)). To specify flags, start ((filebeat)) in the foreground.
+
+
+Also, refer to [((filebeat)) and systemd](((filebeat-ref))/running-with-systemd.html).
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/windows.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/windows.mdx
new file mode 100644
index 0000000000..b81ff66451
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/content/windows.mdx
@@ -0,0 +1,5 @@
+```powershell
+PS C:\Program Files\filebeat> Start-Service filebeat
+```
+
+By default, Windows log files are stored in `C:\ProgramData\filebeat\Logs`.
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/widget.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/widget.mdx
new file mode 100644
index 0000000000..4e2364769c
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/filebeat-start/widget.mdx
@@ -0,0 +1,23 @@
+import Deb from './content/deb.mdx'
+import Rpm from './content/rpm.mdx'
+import Mac from './content/macos.mdx'
+import Linux from './content/linux.mdx'
+import Windows from './content/windows.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/deb.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/deb.mdx
new file mode 100644
index 0000000000..49020a1349
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/deb.mdx
@@ -0,0 +1,6 @@
+
+
+
+Main ((agent)) configuration file location:
+
+`/etc/elastic-agent/elastic-agent.yml`
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/linux.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/linux.mdx
new file mode 100644
index 0000000000..1bae2d4fd6
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/linux.mdx
@@ -0,0 +1,6 @@
+
+
+
+Main ((agent)) configuration file location:
+
+`/opt/Elastic/Agent/elastic-agent.yml`
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/mac.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/mac.mdx
new file mode 100644
index 0000000000..57fb0a7705
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/mac.mdx
@@ -0,0 +1,7 @@
+
+
+
+{/* lint disable */}
+Main ((agent)) configuration file location:
+
+`/Library/Elastic/Agent/elastic-agent.yml`
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/rpm.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/rpm.mdx
new file mode 100644
index 0000000000..49020a1349
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/rpm.mdx
@@ -0,0 +1,6 @@
+
+
+
+Main ((agent)) configuration file location:
+
+`/etc/elastic-agent/elastic-agent.yml`
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/win.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/win.mdx
new file mode 100644
index 0000000000..a8cb44e7c2
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/content/win.mdx
@@ -0,0 +1,6 @@
+
+
+
+Main ((agent)) configuration file location:
+
+`C:\Program Files\Elastic\Agent\elastic-agent.yml`
diff --git a/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/widget.mdx b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/widget.mdx
new file mode 100644
index 0000000000..070e9788b3
--- /dev/null
+++ b/docs/en/serverless/transclusion/observability/tab-widgets/logs/agent-location/widget.mdx
@@ -0,0 +1,24 @@
+
+import Mac from './content/mac.mdx'
+import Linux from './content/linux.mdx'
+import Win from './content/win.mdx'
+import Deb from './content/deb.mdx'
+import Rpm from './content/rpm.mdx'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/support.mdx b/docs/en/serverless/transclusion/support.mdx
new file mode 100644
index 0000000000..b4c88139c5
--- /dev/null
+++ b/docs/en/serverless/transclusion/support.mdx
@@ -0,0 +1,3 @@
+We offer a support experience unlike any other.
+Our team of professionals 'speak human and code' and love making your day.
+[Learn more about subscriptions](https://www.elastic.co/subscriptions).
diff --git a/docs/en/serverless/transclusion/synthetics/configuration/monitor-config-options.mdx b/docs/en/serverless/transclusion/synthetics/configuration/monitor-config-options.mdx
new file mode 100644
index 0000000000..b6ba4d6b61
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/configuration/monitor-config-options.mdx
@@ -0,0 +1,79 @@
+
+
+
+ `id` (`string`)
+
+ A unique identifier for this monitor.
+
+
+ `name` (`string`)
+
+ A human readable name for the monitor.
+
+
+ `tags` (`Array`)
+
+ A list of tags that will be sent with the monitor event. Tags are displayed in the Synthetics UI and allow you to search monitors by tag.
+
+
+ `schedule` (`number`)
+
+ The interval (in minutes) at which the monitor should run.
+
+
+ `enabled` (`boolean`)
+
+ Enable or disable the monitor from running without deleting and recreating it.
+
+
+ `locations` ([`Array`](https://github.com/elastic/synthetics/blob/((synthetics_version))/src/locations/public-locations.ts#L28-L37))
+
+ Where to deploy the monitor. Monitors can be deployed in multiple locations so that you can detect differences in availability and response times across those locations.
+
+ To list available locations you can:
+
+ * Run the `elastic-synthetics locations` command.
+ * Go to **Synthetics** → **Management** and click **Create monitor**.
+ Locations will be listed in _Locations_.
+
+
+ `privateLocations` (`Array`)
+
+ The ((private-location))s to which the monitors will be deployed. These ((private-location))s refer to locations hosted and managed by you, whereas
+ `locations` are hosted by Elastic. You can specify a ((private-location)) using the location's name.
+
+ To list available ((private-location))s you can:
+
+ * Run the `elastic-synthetics locations` command
+ with the URL for the Observability project from which to fetch available locations.
+ * Go to **Synthetics** → **Management** and click **Create monitor**.
+ ((private-location))s will be listed in _Locations_.
+
+
+ `throttling` (`boolean` | [`ThrottlingOptions`](https://github.com/elastic/synthetics/blob/((synthetics_version))/src/common_types.ts#L194-L198))
+
+ Control the monitor's download speeds, upload speeds, and latency to simulate your application's behavior on slower or laggier networks. Set to `false` to disable throttling altogether.
+
+
+ `screenshot` ([`ScreenshotOptions`](https://github.com/elastic/synthetics/blob/((synthetics_version))/src/common_types.ts#L192))
+
+ Control whether or not to capture screenshots. Options include `'on'`, `'off'`, or `'only-on-failure'`.
+
+
+ `alert.status.enabled` (`boolean`)
+
+ Enable or disable monitor status alerts. Read more about alerts in Alerting.
+
+
+ `retestOnFailure` (`boolean`)
+
+ Enable or disable retesting when a monitor fails. Default is `true`.
+
+ By default, monitors are automatically retested if the monitor goes from "up" to "down".
+ If the result of the retest is also "down", an error will be created, and if configured, an alert sent.
+ Then the monitor will resume running according to the defined schedule.
+
+ Using `retestOnFailure` can reduce noise related to transient problems.
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/synthetics/global-managed-paid-for.mdx b/docs/en/serverless/transclusion/synthetics/global-managed-paid-for.mdx
new file mode 100644
index 0000000000..e3e131276b
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/global-managed-paid-for.mdx
@@ -0,0 +1,2 @@
+Executing synthetic tests on Elastic's global managed testing infrastructure incurs an additional charge. Tests are charged under one of two new billing dimensions depending on the monitor type. For _browser monitor_ usage, there is a fee per test run. For _lightweight monitor_ usage, there is a fee per region in which you run any monitors regardless of the number of test runs.
+{/* For more details, refer to [full details and current pricing](https://www.elastic.co/pricing). */}
diff --git a/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/common.mdx b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/common.mdx
new file mode 100644
index 0000000000..c3d2c0c4ec
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/common.mdx
@@ -0,0 +1,325 @@
+
+
+
+
+ **`type`** (`"http"`, `"icmp"`, or `"tcp"`)
+
+
+ **Required**. The type of monitor to run. One of:
+
+ * `http`: Connects via HTTP and optionally verifies that the host returns the expected response.
+ * `icmp`: Uses an ICMP (v4 and v6) Echo Request to ping the configured hosts. Requires special permissions or root access.
+ * `tcp`: Connects via TCP and optionally verifies the endpoint by sending and/or receiving a custom payload.
+
+
+
+
+ **`id`**
+ (string)
+
+
+ **Required**. A unique identifier for this configuration. This should not change with edits to the monitor configuration regardless of changes to any config fields.
+
+ **Examples**:
+
+ ```yaml
+ id: uploader-service
+ ```
+
+ ```yaml
+ id: http://example.net
+ ```
+
+
+ When querying against indexed monitor data this is the field you will be aggregating with. It appears in the exported fields as `monitor.id`.
+
+ If you do not set an `id` explicitly, the monitor's config will be hashed and a generated value will be used. This value will change with any options change to this monitor making aggregations over time between changes impossible. For this reason, it's recommended that you set this manually.
+
+
+
+
+
+ **`name`**
+ (string)
+
+
+ Human readable name for this monitor.
+
+ **Examples**:
+
+ ```yaml
+ name: Uploader service
+ ```
+
+ ```yaml
+ name: Example website
+ ```
+
+
+
+
+ **`service.name`**
+ (string)
+
+
+ APM service name for this monitor. Corresponds to the `service.name` ECS field. Set this when monitoring an app that is also using APM to enable integrations between Synthetics and APM data in your Observability project.
+
+
+
+
+ **`enabled`**
+ (boolean)
+
+
+ Whether the monitor is enabled.
+
+ **Default**: `true`
+
+ **Example**:
+
+ ```yaml
+ enabled: false
+ ```
+
+
+
+
+ **`schedule`**
+ (duration)
+
+
+ **Required**. The task schedule.
+
+
+ Schedules with less than 1 minute resolution will be saved to the nearest minute. For example, `@every 5s` will be changed to `@every 60s` when the monitor is pushed using the CLI.
+
+
+ **Example**:
+ Run the task every 5 minutes from the time the monitor was started.
+
+ ```yaml
+ schedule: @every 5m
+ ```
+
+
+
+
+ **`timeout`**
+ (duration)
+
+
+ The total running time for each ping test. This is the total time allowed for testing the connection and exchanging data.
+
+ **Default**: `16s`
+
+ **Example**:
+
+ ```yaml
+ timeout: 2m
+ ```
+
+
+
+
+ **`tags`**
+ (list of strings)
+
+
+ A list of tags that will be sent with the monitor event.
+
+ **Examples**:
+
+ ```yaml
+ tags:
+ - tag one
+ - tag two
+ ```
+
+ ```yaml
+ tags: ["tag one", "tag two"]
+ ```
+
+
+
+
+ **`mode`**
+ (`"any"` \| `"all"`)
+
+
+ One of two modes in which to run the monitor:
+
+ * `any`: The monitor pings only one IP address for a hostname.
+ * `all`: The monitor pings all resolvable IPs for a hostname.
+
+ **Default**: `any`
+
+ **Example**:
+ If you're using a DNS-load balancer and want to ping every IP address for the specified hostname, you should use `all`.
+
+
+
+
+ **`ipv4`**
+ (boolean)
+
+
+ Whether to ping using the ipv4 protocol if hostnames are configured.
+
+ **Default**: `true`
+
+ **Example**:
+
+ ```yaml
+ ipv4: false
+ ```
+
+
+
+
+ **`ipv6`**
+ (boolean)
+
+
+ Whether to ping using the ipv6 protocol if hostnames are configured.
+
+ **Default**: `true`
+
+ **Example**:
+
+ ```yaml
+ ipv6: false
+ ```
+
+
+
+
+ **`alert`**
+
+
+ Enable or disable alerts on this monitor. Read more about alerts in Alerting.
+
+
+ **`status.enabled`** (boolean)
+
+ Enable monitor status alerts on this monitor.
+
+ **Default**: `true`
+
+ **Example**:
+
+ ```yaml
+ alert.status.enabled: true
+ ```
+
+ **`tls.enabled`** (boolean)
+
+ Enable TLS certificate alerts on this monitor.
+
+ **Default**: `true`
+
+ **Example**:
+
+ ```yaml
+ alert.tls.enabled: true
+ ```
+
+
+
+
+
+
+ **`retest_on_failure`**
+ (boolean)
+
+
+ Enable or disable retesting when a monitor fails. Default is `true`.
+
+ By default, monitors are automatically retested if the monitor goes from "up" to "down". If the result of the retest is also "down", an error will be created, and if configured, an alert sent. Then the monitor will resume running according to the defined schedule. Using `retestOnFailure` can reduce noise related to transient problems.
+
+ **Example**:
+
+ ```yaml
+ retest_on_failure: false
+ ```
+
+
+
+
+ **`locations`**
+ (list of [`SyntheticsLocationsType`](https://github.com/elastic/synthetics/blob/((synthetics_version))/src/locations/public-locations.ts#L28-L37))
+
+
+ Where to deploy the monitor. You can deploy monitors in multiple locations to detect differences in availability and response times across those locations.
+
+ To list available locations you can:
+
+ * Run the `elastic-synthetics locations` command.
+ * Go to **Synthetics** → **Management** and click **Create monitor**. Locations will be listed in _Locations_.
+
+ **Examples**:
+
+ ```yaml
+ locations: ["japan", "india"]
+ ```
+
+ ```yaml
+ locations:
+ - japan
+ - india
+ ```
+
+
+ This can also be set using
+ `monitor.locations` in the Synthetics project configuration file
+ or via the CLI using the `--location` flag on `push`.
+
+ The value defined via the CLI takes precedence over the value defined in the lightweight monitor configuration,
+ and the value defined in the lightweight monitor configuration takes precedence over the value defined in Synthetics project configuration file.
+
+
+
+
+
+ **`private_locations`**
+ (list of strings)
+
+
+ The ((private-location))s to which the monitors will be deployed. These ((private-location))s refer to locations hosted and managed by you, whereas `locations` are hosted by Elastic. You can specify a ((private-location)) using the location's name.
+
+ To list available ((private-location))s you can:
+
+ * Run the `elastic-synthetics locations` command and specify the URL of the Observability project. This will fetch all available private locations associated with the deployment.
+ * Go to **Synthetics** → **Management** and click **Create monitor**. ((private-location))s will be listed in _Locations_.
+
+ **Examples**:
+
+ ```yaml
+ private_locations: ["Private Location 1", "Private Location 2"]
+ ```
+
+ ```yaml
+ private_locations:
+ - Private Location 1
+ - Private Location 2
+ ```
+
+
+ This can also be set using
+ `monitor.privateLocations` in the Synthetics project configuration file
+ or via the CLI using the `--privateLocations` flag on `push`.
+
+ The value defined via the CLI takes precedence over the value defined in the lightweight monitor configuration,
+ and the value defined in the lightweight monitor configuration takes precedence over the value defined in Synthetics project configuration file.
+
+
+
+
diff --git a/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/http.mdx b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/http.mdx
new file mode 100644
index 0000000000..6db8ba09bf
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/http.mdx
@@ -0,0 +1,301 @@
+
+
+
+
+ **`hosts`**
+ (string)
+
+
+
+ **Required**. The URL to ping.
+
+
+
+
+ **`max_redirects`**
+ (number)
+
+
+
+ The total number of redirections Synthetics will follow.
+
+ By default, Synthetics will not follow redirects, but will report the status of the redirect. If set to a number greater than `0`, Synthetics will follow that number of redirects.
+
+ When this option is set to a value greater than `0`, the `monitor.ip` field will no longer be reported, as multiple DNS requests across multiple IPs may return multiple IPs. Fine-grained network timing data will also not be recorded, as with redirects that data will span multiple requests. Specifically the fields `http.rtt.content.us`, `http.rtt.response_header.us`, `http.rtt.total.us`, `http.rtt.validate.us`, `http.rtt.write_request.us` and `dns.rtt.us` will be omitted.
+
+ **Default**: `0`
+
+
+
+
+ **`proxy_headers`**
+
+
+
+ Additional headers to send to proxies during `CONNECT` requests.
+
+
+
+
+ **`proxy_url`**
+ (string)
+
+
+
+ The HTTP proxy URL. This setting is optional.
+
+ **Example**:
+
+
+ ```yaml
+ http://proxy.mydomain.com:3128
+ ```
+
+
+
+
+ **`username`**
+ (string)
+
+
+
+ The username for authenticating with the server. The credentials are passed with the request. This setting is optional.
+
+ You need to specify credentials when your `check.response` settings require it. For example, you can check for a 403 response (`check.response.status: [403]`) without setting credentials.
+
+
+
+
+ **`password`**
+ (string)
+
+
+
+ The password for authenticating with the server. This setting is optional.
+
+
+
+
+ **`ssl`**
+ ([SSL](((heartbeat-ref))/configuration-ssl.html))
+
+
+
+ The TLS/SSL connection settings for use with the HTTPS endpoint. If you don't specify settings, the system defaults are used.
+
+ **Example**:
+
+ ```yaml
+ - type: http
+ id: my-http-service
+ name: My HTTP Service
+ hosts: "https://myhost:443"
+ schedule: '@every 5s'
+ ssl:
+ certificate_authorities: ['/etc/ca.crt']
+ supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]
+ ```
+
+
+
+
+ **`headers`**
+ (boolean)
+
+
+
+ Controls the indexing of the HTTP response headers `http.response.body.headers` field. Set `response.include_headers` to `false` to disable.
+
+ **Default**: `true`
+
+
+
+
+ **`response`**
+
+
+
+ Controls the indexing of the HTTP response body contents to the `http.response.body.contents` field.
+
+
+
+ **`include_body`** (`"on_error"`, `"never"`, or `"always"`)
+
+
+ Set `response.include_body` to one of the following:
+
+ * `on_error`: Include the body if an error is encountered during the check. This is the default.
+ * `never`: Never include the body.
+ * `always`: Always include the body with checks.
+
+
+
+ **`include_body_max_bytes`** (number)
+
+
+ Set `response.include_body_max_bytes` to control the maximum size of the stored body contents.
+
+ **Default**: `1024`
+
+
+
+
+
+
+ **`check`**
+
+
+
+ **`request`**
+
+ An optional `request` to send to the remote host. Under `check.request`, specify these options:
+
+ **`method`** (`"HEAD"`, `"GET"`, `"POST"`, or `"OPTIONS"`)
+
+ The HTTP method to use.
+
+
+ **`headers`** ([HTTP headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers))
+
+ A dictionary of additional HTTP headers to send. By default Synthetics will set the 'User-Agent' header to identify itself.
+
+
+ **`body`** (string)
+
+ Optional request body content.
+
+
+
+ **Example**: This monitor POSTs an `x-www-form-urlencoded` string to the endpoint `/demo/add`.
+
+ ```yaml
+ check.request:
+ method: POST
+ headers:
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ # urlencode the body:
+ body: "name=first&email=someemail%40someemailprovider.com"
+ ```
+
+ **`response`**
+
+ The expected `response`.
+
+ Under `check.response`, specify these options:
+
+
+ **`status`** (list of strings)
+
+ A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`.
+
+
+ **Example**:
+
+ ```yaml
+ check.response:
+ status: [200, 201]
+ ```
+
+
+ **`headers`** ([HTTP headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers))
+
+ The required response headers.
+
+
+ **`body.positive`** (list of strings)
+
+ A list of regular expressions to match the body output. Only a single expression needs to match.
+
+ **Example**:
+
+ This monitor examines the response body for the strings 'foo' or 'Foo':
+
+ ```yaml
+ check.response:
+ status: [200, 201]
+ body:
+ positive:
+ - foo
+ - Foo
+ ```
+
+
+ **`body.negative`** (list of strings)
+
+ A list of regular expressions to match the body output negatively. Return match failed if single expression matches. HTTP response bodies of up to 100MiB are supported.
+
+
+ This monitor examines match successfully if there is no 'bar' or 'Bar' at all, examines match failed if there is 'bar' or 'Bar' in the response body:
+
+ **Example**:
+
+ ```yaml
+ check.response:
+ status: [200, 201]
+ body:
+ negative:
+ - bar
+ - Bar
+ ```
+
+
+ **Example**:
+
+ This monitor examines match successfully only when 'foo' or 'Foo' in body AND no 'bar' or 'Bar' in body:
+
+ ```yaml
+ check.response:
+ status: [200, 201]
+ body:
+ positive:
+ - foo
+ - Foo
+ negative:
+ - bar
+ - Bar
+ ```
+
+
+ **`json`**
+
+ A list of expressions executed against the body when parsed as JSON.
+ Body sizes must be less than or equal to 100 MiB.
+
+
+ **`description`**
+
+ A description of the check.
+
+ **`expression`**
+
+ The following configuration shows how to check the response using
+ [gval](https://github.com/PaesslerAG/gval/blob/master/README.md) expressions
+ when the body contains JSON:
+
+ **Example**:
+
+ ```yaml
+ check.response:
+ status: [200]
+ json:
+ - description: check status
+ expression: 'foo.bar == "myValue"'
+ ```
+
+
+
+
+
+
+
+
+
diff --git a/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/icmp.mdx b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/icmp.mdx
new file mode 100644
index 0000000000..6f44c76824
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/icmp.mdx
@@ -0,0 +1,44 @@
+
+
+
+
+ **`hosts`**
+ (string)
+
+
+ **Required**. The host to ping.
+
+ **Example**:
+
+ ```yaml
+ hosts: "myhost"
+ ```
+
+
+
+
+ **`wait`**
+ (duration)
+
+
+ The duration to wait before emitting another ICMP Echo Request if no response is received.
+
+ **Default**: `1s`
+
+ **Example**:
+
+ ```yaml
+ wait: 1m
+ ```
+
+
+
diff --git a/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/tcp.mdx b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/tcp.mdx
new file mode 100644
index 0000000000..894ff7f6f3
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/reference/lightweight-config/tcp.mdx
@@ -0,0 +1,109 @@
+
+
+
+
+ **`hosts`**
+ (string)
+
+
+ **Required**. The host to ping. The value can be:
+
+ * **A hostname and port, such as `localhost:12345`.**
+ Synthetics connects to the port on the specified host. If the monitor is [configured to use SSL](((heartbeat-ref))/configuration-ssl.html), Synthetics establishes an SSL/TLS-based connection. Otherwise, it establishes a TCP connection.
+ * **A full URL using the syntax `scheme://:[port]`**, where:
+ * `scheme` is one of `tcp`, `plain`, `ssl` or `tls`. If `tcp` or `plain` is specified, Synthetics establishes a TCP connection even if the monitor is configured to use SSL. If `tls` or `ssl` is specified, Synthetics establishes an SSL connection. However, if the monitor is not configured to use SSL, the system defaults are used (currently not supported on Windows).
+ * `host` is the hostname.
+ * `port` is the port number.
+
+ **Examples**:
+
+ ```yaml
+ hosts: "localhost:8000"
+ ```
+
+ ```yaml
+ hosts: "tcp://localhost:8000"
+ ```
+
+
+
+
+ **`check`**
+
+
+ An optional payload string to send to the remote host and the expected answer. If no payload is specified, the endpoint is assumed to be available if the connection attempt was successful. If `send` is specified without `receive`, any response is accepted as OK. If `receive` is specified without `send`, no payload is sent, but the client expects to receive a payload in the form of a "hello message" or "banner" on connect.
+
+ **Example**:
+
+ ```yaml
+ check.send: 'Hello World'
+ check.receive: 'Hello World'
+ ```
+
+ ```yaml
+ check:
+ send: 'Hello World'
+ receive: 'Hello World'
+ ```
+
+
+
+
+ **`proxy_url`**
+
+
+ The URL of the SOCKS5 proxy to use when connecting to the server. The value must be a URL with a scheme of socks5://.
+
+ If the SOCKS5 proxy server requires client authentication, then a username and password can be embedded in the URL.
+
+ When using a proxy, hostnames are resolved on the proxy server instead of on the client. You can change this behavior by setting the `proxy_use_local_resolver` option.
+
+ **Examples**:
+
+ A proxy URL that requires client authentication:
+
+ ```yaml
+ proxy_url: socks5://user:password@socks5-proxy:2233
+ ```
+
+
+
+
+ **`proxy_use_local_resolver`**
+ (boolean)
+
+
+ A Boolean value that determines whether hostnames are resolved locally instead of being resolved on the proxy server. The default value is `false`, which means that name resolution occurs on the proxy server.
+
+ **Default**: `false`
+
+
+
+
+ **`ssl`**
+ ([SSL](((heartbeat-ref))/configuration-ssl.html))
+
+
+ The TLS/SSL connection settings. If the monitor is [configured to use SSL](((heartbeat-ref))/configuration-ssl.html), it will attempt an SSL handshake. If `check` is not configured, the monitor will only check to see if it can establish an SSL/TLS connection. This check can fail either at TCP level or during certificate validation.
+
+ **Example**:
+
+ ```yaml
+ ssl:
+ certificate_authorities: ['/etc/ca.crt']
+ supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]
+ ```
+
+ Also see [Configure SSL](((heartbeat-ref))/configuration-ssl.html) for a full description of the `ssl` options.
+
+
+
diff --git a/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-content/project.mdx b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-content/project.mdx
new file mode 100644
index 0000000000..5863bf056c
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-content/project.mdx
@@ -0,0 +1,10 @@
+
+If you set up the monitor using a Synthetics project,
+you'll delete the monitor from the Synthetics project and push changes.
+
+For lightweight monitors, delete the monitor from the YAML file.
+
+For browser monitors, delete the full journey from the JavaScript or TypeScript file.
+
+Then, run the `push` command.
+The monitor associated with that journey that existed in your Observability project will be deleted.
diff --git a/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-content/ui.mdx b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-content/ui.mdx
new file mode 100644
index 0000000000..242f733cfa
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-content/ui.mdx
@@ -0,0 +1,9 @@
+
+
+
+If you set up the monitor using the Synthetics UI,
+you can delete a lightweight or browser monitor in the UI:
+
+1. In your Observability project, go to **Synthetics** → **Management**.
+1. Click the trash can icon next to the monitor you want to delete.
+
diff --git a/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-widget.mdx b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-widget.mdx
new file mode 100644
index 0000000000..93c622345d
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-delete-monitor-widget.mdx
@@ -0,0 +1,12 @@
+
+import ManageMonitorsDeleteMonitorContentProject from './manage-monitors-delete-monitor-content/project.mdx'
+import ManageMonitorsDeleteMonitorContentUi from './manage-monitors-delete-monitor-content/ui.mdx'
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-content/project.mdx b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-content/project.mdx
new file mode 100644
index 0000000000..9566fa08f3
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-content/project.mdx
@@ -0,0 +1,26 @@
+
+
+
+If you set up the monitor using a Synthetics project,
+you'll update the monitor in the Synthetics project and then `push` changes to your Observability project.
+
+For lightweight monitors, make changes to the YAML file.
+
+For browser monitors, you can update the configuration of one or more monitors:
+
+* To update the configuration of an individual monitor, edit the journey directly in
+ the JavaScript or TypeScript files, specifically the options in `monitor.use`.
+* To update the configuration of _all_ monitors in a Synthetics project, edit the
+ global synthetics configuration file.
+
+To update the journey that a browser monitor runs, edit the journey code directly and
+test the updated journey locally to make sure it's valid.
+
+After making changes to the monitors, run the `push` command
+to replace the existing monitors with new monitors using the updated
+configuration or journey code.
+
+
+Updates are linked to a monitor's `id`. To update a monitor you must keep its `id` the same.
+
+
diff --git a/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-content/ui.mdx b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-content/ui.mdx
new file mode 100644
index 0000000000..74c4c97bd4
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-content/ui.mdx
@@ -0,0 +1,14 @@
+
+
+
+If you set up the monitor using the UI,
+you can update the monitor configuration of both lightweight and browser monitors
+in the UI:
+
+1. In your Observability project, go to **Synthetics** → **Management**.
+1. Click the pencil icon next to the monitor you want to edit.
+1. Update the _Monitor settings_ as needed.
+ 1. To update the journey used in a browser monitor, edit _Inline script_.
+ 1. Make sure to click **Run test** to validate the new journey before updating the monitor.
+1. Click **Update monitor**.
+
diff --git a/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-widget.mdx b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-widget.mdx
new file mode 100644
index 0000000000..d88c40700e
--- /dev/null
+++ b/docs/en/serverless/transclusion/synthetics/tab-widgets/manage-monitors-update-monitor-widget.mdx
@@ -0,0 +1,12 @@
+
+import ManageMonitorsUpdateMonitorContentProject from './manage-monitors-update-monitor-content/project.mdx'
+import ManageMonitorsUpdateMonitorContentUi from './manage-monitors-update-monitor-content/ui.mdx'
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/en/serverless/what-is-observability-serverless.mdx b/docs/en/serverless/what-is-observability-serverless.mdx
new file mode 100644
index 0000000000..0cb37c8a04
--- /dev/null
+++ b/docs/en/serverless/what-is-observability-serverless.mdx
@@ -0,0 +1,87 @@
+---
+id: serverlessObservabilityWhatIsObservabilityServerless
+slug: /serverless/observability/what-is-observability-serverless
+title: What is Observability serverless?
+# description: Description to be written
+tags: [ 'serverless', 'observability', 'overview' ]
+layout: landing
+---
+
+
+
+
+
+While in technical preview, Elastic Observability serverless projects should not be used for production workloads.
+
+
+
+
+