diff --git a/.github/workflows/auto-comment.yml b/.github/workflows/auto-comment.yml index f403ec4795b..5a53ab41129 100644 --- a/.github/workflows/auto-comment.yml +++ b/.github/workflows/auto-comment.yml @@ -12,7 +12,7 @@ jobs: Thanks for your pull request! Your PR is in a queue, and a writer will take a look soon. We generally publish small edits within one business day, and larger edits within three days. - Gatsby Cloud will automatically generate a preview of your request, and will comment with a link when the preview is ready (usually 20 to 30 minutes). + We will automatically generate a preview of your request, and will comment with a link when the preview is ready (usually 10 to 20 minutes). If you add any more commits, you can comment `netlify build` on this PR to update the preview. issuesOpened: | Hi @{{ author }} 👋 diff --git a/.github/workflows/build-notification.yml b/.github/workflows/build-notification.yml index da862f43e29..7ba61dccf2d 100644 --- a/.github/workflows/build-notification.yml +++ b/.github/workflows/build-notification.yml @@ -15,7 +15,7 @@ on: options: - building - ready - - failed + - error deployUrl: description: URL of live branch deploy required: false @@ -60,7 +60,7 @@ jobs: END ) ;; - "failed") + "error") comment_body=$(cat <<-END ### Deploy Preview failed! @@ -92,8 +92,8 @@ jobs: -f context='netlify/build' \ /repos/${{ env.repo }}/statuses/${{ inputs.sha }} - - name: update PR check - failed - if: ${{ inputs.buildStatus == 'failed' }} + - name: update PR check - error + if: ${{ inputs.buildStatus == 'error' }} env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | diff --git a/.github/workflows/manual-deploy-comment.yml b/.github/workflows/manual-deploy-comment.yml index 8d60acb40b8..daa816415cd 100644 --- a/.github/workflows/manual-deploy-comment.yml +++ b/.github/workflows/manual-deploy-comment.yml @@ -3,10 +3,20 @@ name: Netlify build manual deploy comment on: issue_comment: types: [created] + pull_request: + types: [opened] jobs: deploy-preview: + # when a contributor comments 'netlify build', + # but only on pull requests, not issues. + # or if a contributor opens a PR for the first time. + if: | + (github.event.comment.body == 'netlify build' + && github.event.issue.pull_request) + || github.event.action == 'opened' runs-on: ubuntu-latest + steps: # we use `jq` to parse the GH API response - name: setup jq @@ -15,13 +25,8 @@ jobs: - name: send request to Netlify build hook env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # when a contributor comments 'netlify build', - # but only on pull requests, not issues - if: | - contains(github.event.comment.body, 'netlify build') - && ${{ github.event.issue.pull_request }} run: | - gh_api_url=$(echo ${{ github.event.issue.pull_request.url }} | sed 's/https:\/\/api.github.com//') + gh_api_url=$(echo ${{ github.event.issue.pull_request.url || github.event.pull_request.url }} | sed 's/https:\/\/api.github.com//') gh_api_response=$(gh api $gh_api_url) branch_name=$(echo $gh_api_response | jq -r .head.ref) sha=$(echo $gh_api_response | jq -r .head.sha) diff --git a/.gitignore b/.gitignore index 30b2b91c0cb..e2cd323bb5d 100644 --- a/.gitignore +++ b/.gitignore @@ -88,3 +88,6 @@ yarn-error.log .devcontainer src/images/infrastructure_screenshot_full_sonarqube-dashboard.webp + +# Local Netlify folder +.netlify diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b39416791f7..62ab4097cd4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,8 +105,9 @@ If the workflows are enabled and running, you will want to disable them. You can 2. Make your changes. 3. Test your changes! Review the project's [READ ME](README.md) for instructions on how to build and run tests locally. 4. Submit a `Pull Request` to this project with your changes. -5. If/when your `PR` is accepted, the automation in this project will build the site and deploy a new version of the code to `docs.newrelic.com`. -6. And you are done! +5. A preview will start building automatically when a PR is opened. To update the preview after further commits, leave a comment on the PR that says `netlify build`. +6. If/when your `PR` is accepted, the automation in this project will build the site and deploy a new version of the code to `docs.newrelic.com`. +7. And you are done! ### Submitting a PR from a cloned repo @@ -116,8 +117,9 @@ If the workflows are enabled and running, you will want to disable them. You can 3. Make your changes. 4. Test your changes! Review the project's [READ ME](README.md) for instructions on how to build and run tests locally. 5. Submit a `Pull Request` to this project with your changes. -6. If/when your `PR` is accepted, the automation in this project will build the site and deploy a new version of the code to `docs.newrelic.com`. -7. And you are done! +6. A preview will start building automatically when a PR is opened. To update the preview after further commits, leave a comment on the PR that says `netlify build`. +7. If/when your `PR` is accepted, the automation in this project will build the site and deploy a new version of the code to `docs.newrelic.com`. +8. And you are done! ### Using the `develop` branch diff --git a/env.js b/env.js index 0d8cb8a157d..9b99882f440 100644 --- a/env.js +++ b/env.js @@ -10,6 +10,12 @@ const assetPrefix = () => { if (process.env.BUILD_LANG === 'kr') { return 'https://docs-website-kr.netlify.app'; } + if (process.env.BUILD_LANG === 'es') { + return 'https://docs-website-es.netlify.app'; + } + if (process.env.BUILD_LANG === 'pt') { + return 'https://docs-website-pt.netlify.app'; + } return ''; }; diff --git a/gatsby-config.js b/gatsby-config.js index 868b6fbed1a..cd60471bb0b 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -374,7 +374,7 @@ module.exports = { }, }, layout: { - contentPadding: '1.5rem', + contentPadding: '5rem', maxWidth: '1600px', component: require.resolve('./src/layouts'), mobileBreakpoint: '760px', @@ -382,7 +382,7 @@ module.exports = { }, i18n: { translationsPath: `${__dirname}/src/i18n/translations`, - additionalLocales: ['jp', 'kr'], + additionalLocales: LOCALES, }, prism: { languages: [ diff --git a/gatsby-node.js b/gatsby-node.js index aa34b3d8b53..e56b132c979 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -244,16 +244,10 @@ exports.createSchemaCustomization = ( translationType: String eolDate: String downloadLink: String - signupBanner: SignupBanner features: [String] bugs: [String] security: [String] } - type SignupBanner { - cta: String - url: String - text: String - } `; @@ -366,20 +360,6 @@ exports.createResolvers = ({ createResolvers }) => { hasOwnProperty(source, 'security') ? source.security : null, }, }, - SignupBanner: { - cta: { - resolve: (source) => - hasOwnProperty(source, 'cta') ? source.cta : null, - }, - url: { - resolve: (source) => - hasOwnProperty(source, 'url') ? source.url : null, - }, - text: { - resolve: (source) => - hasOwnProperty(source, 'text') ? source.text : null, - }, - }, }); }; diff --git a/netlify.toml b/netlify.toml index 7c2c04e683a..1295d68fcfe 100644 --- a/netlify.toml +++ b/netlify.toml @@ -4,6 +4,10 @@ package = "@netlify/plugin-gatsby" [functions] included_files = ["!.cache/data/datastore/data.mdb","!.cache/query-engine"] +[[edge_functions]] + path = "/*" + function = "osano-country" + [[headers]] for = "/*" diff --git a/netlify/edge-functions/osano-country.js b/netlify/edge-functions/osano-country.js new file mode 100644 index 00000000000..a09fbe12082 --- /dev/null +++ b/netlify/edge-functions/osano-country.js @@ -0,0 +1,53 @@ +import { HTMLRewriter } from 'https://ghuc.cc/worker-tools/html-rewriter/index.ts'; + +export default async (request, context) => { + const response = await context.next(); + const hasGdpr = new Set([ + 'AT', + 'BE', + 'BG', + 'HR', + 'CY', + 'CZ', + 'DK', + 'EE', + 'FI', + 'FR', + 'DE', + 'GR', + 'HU', + 'IE', + 'IT', + 'LV', + 'LT', + 'LU', + 'MT', + 'NL', + 'PL', + 'PT', + 'RO', + 'SK', + 'SI', + 'ES', + 'SE', + ]).has(context.geo.country.code); + + if (hasGdpr) { + return new HTMLRewriter() + .on('script', { + element(element) { + const scriptSrc = element.getAttribute('src'); + if ( + typeof scriptSrc === 'string' && + scriptSrc.startsWith('https://cmp.osano.com/') + ) { + element.setAttribute( + 'src', + scriptSrc.replace(/variant=one/gi, 'variant=two') + ); + } + }, + }) + .transform(response); + } +}; diff --git a/package.json b/package.json index 0ecea10fc9f..bb428e2feae 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,7 @@ "@emotion/styled": "^11.3.0", "@mdx-js/mdx": "2.0.0-next.8", "@mdx-js/react": "2.0.0-next.8", - "@newrelic/gatsby-theme-newrelic": "9.3.1", + "@newrelic/gatsby-theme-newrelic": "9.4.4", "@splitsoftware/splitio-react": "^1.2.4", "ansi-colors": "^4.1.3", "cockatiel": "^3.0.0-beta.0", diff --git a/src/@newrelic/gatsby-theme-newrelic/components/Logo.js b/src/@newrelic/gatsby-theme-newrelic/components/Logo.js index 3eb46c406eb..94952ece0f4 100644 --- a/src/@newrelic/gatsby-theme-newrelic/components/Logo.js +++ b/src/@newrelic/gatsby-theme-newrelic/components/Logo.js @@ -12,9 +12,6 @@ const Logo = ({ className, width }) => ( .text-color { fill: #e7f6f6; } - .hexagon-color { - fill: var(--system-text-primary-light); - } .brand-color { fill: #1ce783; } @@ -41,10 +38,6 @@ const Logo = ({ className, width }) => ( d="M489.06,112.81a11.7,11.7,0,0,0,10.72,6.57c4.08,0,7.7-1.75,7.7-5.56s-3.35-4.36-9.11-5.56-11.79-2.55-11.79-9.72C486.58,92.31,492,88,499.44,88c6.3,0,11.46,3.08,13.53,7.23l-4.28,3.42a10.27,10.27,0,0,0-9.52-5.69c-4,0-6.69,2-6.69,5.15,0,3.29,3.08,3.89,8,5,6,1.41,12.93,2.61,12.93,10.25,0,6.7-6.1,11.12-13.6,11.12-6.37,0-12.73-2.75-15.41-8.17Z" transform="translate(-251.43 -50.58)" /> - 0 && - css` - span { - font-weight: 600; - } - `} - ${depth > 1 && + border-left: ${parent == null + ? 'none' + : 'solid var(--system-background-hover-dark) 2px'}; + + ${isExpanded && + depth === 0 && css` - border-left: solid rgba(231, 246, 246, 0.1) 2px; - span { - font-weight: 500; - } + span, + svg { + color: white; + opacity: 1; `} - a > div > span { - font-weight: 400; - } ${mobileBreakpoint && css` @@ -138,7 +130,9 @@ const NavItem = ({ padding-left: ${root?.icon ? 'calc(var(--icon-size) + var(--icon-spacing))' : 'var(--nav-link-padding)'}; - + &:hover { + background: var(--system-background-hover-dark); + } ${mobileBreakpoint && css` @media screen and (max-width: ${mobileBreakpoint}) { @@ -149,7 +143,10 @@ const NavItem = ({ `} ${isCurrentPage && css` - background: #0d374a; + background: var(--system-background-hover-dark); + span { + font-weight: 600; + } `} `} > diff --git a/src/@newrelic/gatsby-theme-newrelic/components/NavLink.js b/src/@newrelic/gatsby-theme-newrelic/components/NavLink.js index 3f770c8ebff..cc69d4138c5 100644 --- a/src/@newrelic/gatsby-theme-newrelic/components/NavLink.js +++ b/src/@newrelic/gatsby-theme-newrelic/components/NavLink.js @@ -45,7 +45,6 @@ const NavLink = ({ margin: 0 calc(var(--nav-link-padding) * -1); font-size: 0.875rem; text-decoration: none; - border-radius: 0.25rem; &:hover { color: var(--secondary-text-color); diff --git a/src/components/DocPageBanner.js b/src/components/DocPageBanner.js deleted file mode 100644 index cbaa9586824..00000000000 --- a/src/components/DocPageBanner.js +++ /dev/null @@ -1,109 +0,0 @@ -// VSU -import React from 'react'; -import { css } from '@emotion/react'; -import { - Button, - Link, - Icon, - useInstrumentedHandler, -} from '@newrelic/gatsby-theme-newrelic'; -import lines from './bannerLines.svg'; - -const DocPageBanner = ({ text, cta, url, height, onClose }) => { - const ctaWithDefault = cta ?? 'Start now'; - const urlWithDefault = url ?? 'https://newrelic.com/signup'; - - const handleBannerDismiss = useInstrumentedHandler( - () => { - onClose(); - }, - { - eventName: 'dismiss', - category: 'DocBanner', - } - ); - return ( -
-
-

{text}

- -
- -
- ); -}; - -export default DocPageBanner; diff --git a/src/components/InlinePopover/layouts/TwoButton.js b/src/components/InlinePopover/layouts/TwoButton.js index 304609b6cd8..7405ab1e1e4 100644 --- a/src/components/InlinePopover/layouts/TwoButton.js +++ b/src/components/InlinePopover/layouts/TwoButton.js @@ -90,7 +90,7 @@ const Container = styled.div` const Heading = styled.h2` color: currentColor; - font-size: 0.75rem; + font-size: 1rem; font-weight: 500; grid-column: 1 / 3; justify-self: start; @@ -123,7 +123,8 @@ const SecondaryButton = styled(Button)` `; const Text = styled.p` - font-size: 0.75rem; + font-size: 1rem; + font-weight: 300; grid-column: 1 / 3; margin: 0 0 8px; `; diff --git a/src/components/Layout/Sidebar.js b/src/components/Layout/Sidebar.js index 5d66e9f8765..a041663b607 100644 --- a/src/components/Layout/Sidebar.js +++ b/src/components/Layout/Sidebar.js @@ -27,7 +27,7 @@ const Sidebar = ({ children, className }) => { bottom: 0; left: 0; right: 0; - padding: var(--site-content-padding); + padding: 1.5rem; overflow: auto; `} > diff --git a/src/components/NavFooter.js b/src/components/NavFooter.js deleted file mode 100644 index ed347a9d518..00000000000 --- a/src/components/NavFooter.js +++ /dev/null @@ -1,115 +0,0 @@ -import React from 'react'; -import { css } from '@emotion/react'; -import { useLocation } from '@reach/router'; -import PropTypes from 'prop-types'; -import { - ExternalLink, - Icon, - useTranslation, -} from '@newrelic/gatsby-theme-newrelic'; -import ctaJson from '../data/nav-footer-cta'; - -const NavFooter = ({ className }) => { - const { t } = useTranslation(); - const location = useLocation(); - const currentPage = location.pathname; - let ctaContent = ctaJson.default; - const allCTAs = Object.keys(ctaJson); - - for (const product of allCTAs) { - if ( - currentPage.includes(ctaJson[product].directory) && - product !== 'default' - ) { - ctaContent = ctaJson[product]; - } - } - - return ( -
- - - {t(`navFooter.${ctaContent.i18nKey}`)} - - -
- ); -}; - -NavFooter.propTypes = { - className: PropTypes.string, -}; - -export default NavFooter; diff --git a/src/components/Navigation.js b/src/components/Navigation.js index 02722b5b0b8..b438aa487aa 100644 --- a/src/components/Navigation.js +++ b/src/components/Navigation.js @@ -31,7 +31,6 @@ const Navigation = ({ nav, className }) => { height: 100%; overflow: auto; margin: 16px 0; - padding-bottom: 2rem; -ms-overflow-style: none; scrollbar-width: none; &::-webkit-scrollbar { @@ -39,7 +38,8 @@ const Navigation = ({ nav, className }) => { } span, svg { - color: #afe2e3; + color: var(--system-text-primary-dark); + opacity: 0.8; } `} id="nav" @@ -55,9 +55,11 @@ const Navigation = ({ nav, className }) => {

diff --git a/src/components/bannerLines.svg b/src/components/bannerLines.svg deleted file mode 100644 index b046dd9989c..00000000000 --- a/src/components/bannerLines.svg +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/content/docs/accounts/accounts-billing/account-setup/troubleshoot-new-relics-password-email-address-login-problems.mdx b/src/content/docs/accounts/accounts-billing/account-setup/troubleshoot-new-relics-password-email-address-login-problems.mdx index 812b9ce9a49..c0ade8bf86f 100644 --- a/src/content/docs/accounts/accounts-billing/account-setup/troubleshoot-new-relics-password-email-address-login-problems.mdx +++ b/src/content/docs/accounts/accounts-billing/account-setup/troubleshoot-new-relics-password-email-address-login-problems.mdx @@ -11,8 +11,6 @@ redirects: - /docs/accounts-partnerships/accounts/account-setup/troubleshoot-password-email-address-login-problems - /docs/accounts-partnerships/install-new-relic/account-setup/troubleshoot-password-email-address-login-problems - /docs/accounts/install-new-relic/account-setup/troubleshoot-password-email-address-login-problems -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts.mdx b/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts.mdx index 80ee1102cc2..11110853664 100644 --- a/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts.mdx +++ b/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts.mdx @@ -177,20 +177,6 @@ Here's a table with our standard roles. To better understand these roles, go to - - - **Manage v1 users** - - - - For New Relic organizations that existed before July 30 2020 and have users on our [original user model](/docs/accounts/original-accounts-billing/original-users-roles/overview-user-models), this role lets you manage those "v1" users. This is used primarily for overseeing a [user migration process](/docs/accounts/original-accounts-billing/original-users-roles/user-migration/). - - - - Required: full platform. - - - diff --git a/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-permissions.mdx b/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-permissions.mdx index 117b120a020..f4e8068ffcd 100644 --- a/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-permissions.mdx +++ b/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-permissions.mdx @@ -44,15 +44,11 @@ Our pre-built roles have various groupings of permissions. How our pre-built rol ## Permission definitions [#permission-definitions] -Here's a screenshot of the permissions available in the permissions UI. These are only a subset of everything you can do in New Relic and represent the specific permissions we believe are likely to be valuable for creating custom roles. +You can go to the UI to view the permissions for each of our pre-built roles. In the lower-left corner of the [UI](https://one.newrelic.com), click on your name to open the user menu, and then go to **Administration > Access management > Roles**. Lists of permissions are available for the following roles: -New Relic user permissions UI screenshot - -Note that permissions can change. The permissions in this doc were last updated April 3, 2023. +* All product admin +* Standard user +* Read only To learn more about specific permissions, select a category below, or try searching this doc for a specific term you're looking for. diff --git a/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-type.mdx b/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-type.mdx index 1358382ba74..8985238eac6 100644 --- a/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-type.mdx +++ b/src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-type.mdx @@ -12,8 +12,6 @@ redirects: - /docs/accounts/users-roles/user-type - /docs/accounts/accounts-billing/new-relic-one-pricing-billing/core-users-release - /docs/accounts/pricing-billing/new-relic-one-pricing/new-relic-one-pricing-details -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/agile-handbook/appendices/ticket-best-practices.mdx b/src/content/docs/agile-handbook/appendices/ticket-best-practices.mdx index 9c8a5617b36..72e3b03955d 100644 --- a/src/content/docs/agile-handbook/appendices/ticket-best-practices.mdx +++ b/src/content/docs/agile-handbook/appendices/ticket-best-practices.mdx @@ -3,8 +3,6 @@ title: "Ticket best practices: How to write a sprint-ready Jira" template: basicDoc topics: - Docs agile handbook -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/ai-monitoring/compatibility-requirements-ai-monitoring.mdx b/src/content/docs/ai-monitoring/compatibility-requirements-ai-monitoring.mdx new file mode 100644 index 00000000000..65efe131fc1 --- /dev/null +++ b/src/content/docs/ai-monitoring/compatibility-requirements-ai-monitoring.mdx @@ -0,0 +1,76 @@ +--- +title: Compatibility and requirements for AI monitoring +metaDescription: 'Compatibility and requirements for AI monitoring' +freshnessValidatedDate: never +--- + +AI monitoring allows agents to recognize and capture AI data. AI monitoring has different library compatibility requirements depending on what language you used for your AI-powered app. + +When you disable distributed tracing or enable high security mode, the agent will not capture AI data. + + +You should not enable AI monitoring if you're a [FedRAMP customer](/docs/security/security-privacy/compliance/certificates-standards-regulations/fedramp-moderate), because AI and AI-based technologies are not currently FedRAMP authorized. + + +## Compatible AI libraries [#compatibility] + +AI monitoring is compatible with these agent versions and AI libraries: + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Agent version + + Supported libraries +
+ [Go version 3.31.0 and above](/docs/apm/agents/go-agent/get-started/go-agent-compatibility-requirements/#digital-intelligence-platform) + + * [Go OpenAI library](https://github.com/sashabaranov/go-openai) versions 1.19.4 and above + * [AWS SDK for Go v2](https://github.com/aws/aws-sdk-go-v2) versions 1.6.0 and above +
+ [Node.js version 11.13.0 and above](/docs/apm/agents/nodejs-agent/getting-started/compatibility-requirements-nodejs-agent/#digital-intelligence-platform) + + * [OpenAI Node.js API library](https://www.npmjs.com/package/openai/v/4.0.0) versions 4.0.0 and above. If your model uses streaming, the Node.js agent supports versions 4.12.2 and above + * [AWS SDK for JavaScript BedrockRuntime Client](https://www.npmjs.com/package/@aws-sdk/client-bedrock-runtime) versions 3.474.0 and above + * [LangChain.js](https://www.npmjs.com/package/langchain/v/0.1.17) versions 0.1.17 and above +
+ [Python version 9.8.0 and above](/docs/apm/agents/python-agent/getting-started/compatibility-requirements-python-agent/#digital-intelligence-platform) + + * [OpenAI](https://pypi.org/project/openai/) library versions 0.28.0 and above. + + * [Boto3 AWS SDK for Python](https://pypi.org/project/boto3/) versions 1.28.57 and above. + * [LangChain](https://pypi.org/project/langchain/) versions 0.1.0 and above. +
+ [Ruby version 9.8.0 and above](/docs/apm/agents/ruby-agent/getting-started/ruby-agent-requirements-supported-frameworks/#digital-intelligence-platform) + + * [OpenAI gem](https://github.com/alexrudall/ruby-openai) version 3.4.0 and above +
+ +## What's next? [#whats-next] + +* You can get started by [installing AI monitoring](/install/ai-monitoring). +* Explore our AI monitoring UI to see how we can help you [improve your AI-powered app](/docs/ai-monitoring/view-ai-data). +* Learn how to maintain data compliancy by [setting up drop filters](/docs/ai-monitoring/drop-sensitive-data). \ No newline at end of file diff --git a/src/content/docs/ai-monitoring/customize-agent-ai-monitoring.mdx b/src/content/docs/ai-monitoring/customize-agent-ai-monitoring.mdx new file mode 100644 index 00000000000..56d66a896fd --- /dev/null +++ b/src/content/docs/ai-monitoring/customize-agent-ai-monitoring.mdx @@ -0,0 +1,138 @@ +--- +title: 'Customize the agent for AI monitoring' +metaDescription: 'You can apply certain configurations to your APM agents to change how your AI data appears in New Relic.' +freshnessValidatedDate: never +--- + +Once you [install AI monitoring](/install/ai-monitoring), you can configure the default behavior of the agent or update your app to collect different kinds of data. + +## Configure the agent [#configure-agents] + +Update default agent behavior for AI monitoring at these agent configuration docs: + + + + * [`ai_monitoring.enabled`](/docs/apm/agents/go-agent/configuration/go-agent-configuration/#ai-monitoring-enabled) + * [`ai_monitoring.streaming.enabled`](/docs/apm/agents/go-agent/configuration/go-agent-configuration/#ai-monitoring-streaming) + * [`ai_monitoring.record_content.enabled`](/docs/apm/agents/go-agent/configuration/go-agent-configuration/#ai-monitoring-record-content) + * [`ConfigCustomInsightsEventsMaxSamplesStored`](/docs/apm/agents/go-agent/configuration/go-agent-configuration/#env-var-table) + + + * [`ai_monitoring.enabled`](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#ai-monitoring-enabled) + * [`ai_monitoring.streaming.enabled`](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#ai-monitoring-streaming) + * [`ai_monitoring.record_content.enabled`](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#ai-monitoring-record-content) + * [`custom_insights_events.max_samples_stored`](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#custom_events_max_samples_stored) + * [`span_events.max_samples_stored`](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#span-events-max-samples-stored) + + + * [`ai_monitoring.enabled`](/docs/apm/agents/python-agent/configuration/python-agent-configuration/#ai-monitoring-enabled) + * [`ai_monitoring.streaming.enabled`](/docs/apm/agents/python-agent/configuration/python-agent-configuration/#ai-monitoring-streaming) + * [`ai_monitoring.record_content.enabled`](/docs/apm/agents/python-agent/configuration/python-agent-configuration/#ai-monitoring-record-content) + * [`event_harvest_config.harvest_limits.span_event_data`](/docs/apm/agents/python-agent/configuration/python-agent-configuration/#harvest-limits-span-event-data) + * [`event_harvest_config.harvest_limits.custom_event_data`](/docs/apm/agents/python-agent/configuration/python-agent-configuration/#harvest-limits-custom-event-data) + + + * [`ai_monitoring.enabled`](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#ai-monitoring-enabled) + * [`instrumentation.ruby_openai`](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#ruby-openai) + * [`ai_monitoring.record_content.enabled`](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#ai-monitoring-record-content) + * [`span_events.max_samples_stored`](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#custom_insights_events-max_samples_stored) + * [`custom_insights_events.max_samples_stored`](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#span_events-max_samples_stored) + + + +## Token count method [#enable-token] + + +If you haven't disabled `ai_monitoring.record_content.enabled`, you don't need to implement the token count callback API. + +Disabling `ai_monitoring.record_content.enabled` stops the agent from sending AI content to New Relic, but it also prevents the agent from forwarding token counts for interactions with your app. You can implement a callback in your app code to determine the token counts locally, then have that information forwarded to New Relic. + +Refer to the docs below for examples to set up counting tokens locally: + + + + + Refer to the Go API docs for [`SetLLMTokenCountCallback`](https://pkg.go.dev/github.com/newrelic/go-agent/v3/newrelic#Application.SetLLMTokenCountCallback) + + + + Refer to our API docs for [`recordLlmFeedbackEvent`](https://newrelic.github.io/node-newrelic/API.html#recordLlmFeedbackEvent) + + + Refer to our API docs for [`set_llm_token_count_callback`](/docs/apm/agents/python-agent/python-agent-api/setllmtokencountcallback-python-agent-api). + + + Refer to our API docs for [`NewRelic::Agent.set_llm_token_count_callback`](/docs/apm/agents/ruby-agent/api-guides/ruby-ai-monitoring-apis). + + + +## User feedback methods [#enable-feedback] + +If end users can leave feedback on an AI response, you can forward this data into the AI monitoring response table. To do this, you'll update your app code to correlate trace IDs from AI event data using callback methods. + +There are two methods you need to implement to forward this kind of information: + + + + + Refer to the Go API docs for: + + * [`GetTraceMetadata`](https://pkg.go.dev/github.com/newrelic/go-agent/v3/newrelic#Application.getTraceMetadata) + * [`RecordLLMFeedbackEvent`](https://pkg.go.dev/github.com/newrelic/go-agent/v3/newrelic#Application.RecordLLMFeedbackEvent) + + + + + Refer to our API docs for: + + * [`setLlmTokenCountCallback`](https://newrelic.github.io/node-newrelic/API.html#setLlmTokenCountCallback) + * [`getTraceMetadata`](https://newrelic.github.io/node-newrelic/API.html#getTraceMetadata) + + + + Refer to our API docs for: + + * [`newrelic.agent.current_trace_id()`](/docs/apm/agents/python-agent/python-agent-api/currenttraceid-python-agent) + * [`record_llm_feedback_event`](/docs/apm/agents/python-agent/python-agent-api/recordllmfeedbackevent-python-agent-api) + + + + Refer to our API docs for [`NewRelic::Agent.record_llm_feedback_event`](/docs/apm/agents/ruby-agent/api-guides/ruby-ai-monitoring-apis/#user-feedback) + + \ No newline at end of file diff --git a/src/content/docs/ai-monitoring/drop-sensitive-data.mdx b/src/content/docs/ai-monitoring/drop-sensitive-data.mdx new file mode 100644 index 00000000000..b7b586dc9fd --- /dev/null +++ b/src/content/docs/ai-monitoring/drop-sensitive-data.mdx @@ -0,0 +1,245 @@ +--- +title: 'Remove sensitive data with drop filters' +metaDescription: 'Drop filters prompts AI monitoring to drop attributes containing sensitive data.' +freshnessValidatedDate: never +--- + +import aiDropFilterModal from 'images/ai_screenshot-crop_drop-filter-modal.webp' + +import aiDropFilterTable from 'images/ai_screenshot-crop_drop-filter-table.webp' + +You have two options for dropping sensitive AI data before you send it to New Relic. This doc guides you through these two methods so you can have better control over the kinds of data the agent collects. + +## Disable `ai.monitoring.record_content_enabled` [#disable-event] + +When you disable `ai_monitoring.record_content.enabled`, event data containing end user prompts and AI responses won’t be sent to NRDB. You can read more about agent configurations at our [AI monitoring configuration doc](/docs/ai-monitoring/customize-agent-ai-monitoring). + +## Create drop filters [#create-filter] + + +Use caution when deciding to drop data. The data you drop is not recoverable. Before using this feature, [review your data compliance responsibilities](#responsibilities). + + +A single drop filter targets a specified attribute within one event type, but sensitive information from a single AI interaction is stored in multiple events. To drop information before it enters NRDB, you need six separate drop filters. + + When you click Create a drop filter, a side modal appears to guide you through dropping text from events and attributes. + +1. Go to **[one.newrelic.com](https://one.newrelic.com) > All capabilities > AI monitoring > Drop filters**, then click **Create drop filter**. +1. Create a filter name. Because one kind of data requires at least six drop filters, we recommend a naming convention that helps you track the events you're dropping data from. +1. Paste in the base NRQL query: + ```sql + SELECT FROM WHERE RLIKE + ``` +1. Referring to the table, update the `` and `` placeholders to match the attributes and events. For example: + ```sql + SELECT content FROM LlmChatCompletionMessage WHERE content RLIKE + ``` +1. Add the regex that corresponds to the kind of information you want to drop. For example, if you're targeting IPv4 addresses, the completed query should read: + ```sql + SELECT content FROM LlmChatCompletionMessage WHERE content RLIKE ^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$ + ``` +1. Repeat the above steps to create drop filters for the remaining events and column pairs. + +## How drop filters work [#drop-rules-work] + +A drop filter evaluates data forwarded by the agent within the data ingest pipeline. A drop filter contains three parts: + +* **Events**: A stored record from an interaction within your system. +* **Attributes**, or columns: A key-value pair attached to data objects. +* **Regex**: A string of characters and operators that corresponds to kinds of information. + +### Events and attributes + +In a typical AI interaction, a prompt or request undergoes certain processes (like embedding) that are recorded as discrete events. For example, let's say a customer requests a street address on file. The model processes the prompt, which pulls additional context through various services and databases. Your AI assistant then returns with a response that contains the requested information. + + When you click Create a drop filter, a side modal appears to guide you through dropping text from events and attributes. + +A complete set of drop filters for a piece of sensitive information should include queries for the six events provided in the drop filter table. For each event, you need to create separate filters for attributes when there are more than one. Your drop filters correspond to event and column pairs in a given row. A few things to remember: + +* The column `content` appears in the `LlmChatCompletionMessage` event and not in the `LlmEmbedding` event. +* The column `messages` only appears in `LlmFeedbackMessage` but not in `LlmTool`. +* The exception to this rule is the attribute `input`, which appears in both `LlmEmbedding` and `LlmTool`. + +### Regex + +Since the agent's default behavior is to capture all parts of event data before sending it to New Relic, you need to direct the ingest pipeline to match sensitive information with regex. By targeting an attribute with regex, you can still capture the event itself without storing sensitive information in our databases. + +Refer to the regex below to start building your first queries: + + + + **Expression:** + ``` + (\d{10}) + ``` + + + + **Expression:** + ``` + (\d{3}[-\s\.]?\d{3}[-\s\.]?\d{3}) + ``` + + + + **Expression:** + ``` + ([a-zA-Z0-9!#$'*+?^_`{|}~.-]+(?:@|%40)(?:[a-zA-Z0-9-]+\.)+[a-zA-Z0-9-]+) + ``` + + + + **Expression:** + ``` + ([a-zA-Z]){5}([0-9]){4}([a-zA-Z]){1}? + ``` + + + + **Expression:** + ``` + ([2-9]{1}[0-9]{3}\s\d{4}\s\d{4}) + ``` + + + + **Expression:** + ``` + ([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3}) + ``` + + + + **Expression:** + + ``` + (d{4}\sd{4}\sd{4}) + ``` + + + + **Expression:** + ``` + ([a-zA-Z]?[-\s]?\d{7,8}[-\s]?[a-zA-Z]) + ``` + + + + **Expression:** + ``` + (\d{3}[-\s\.]?\d{2}[-\s\.]?\d{4}) + ``` + + + + **Expression:** + ``` + ([a-zA-Z]{2}[-\s]?\d{2}[-\s]?\d{2}[-\s]?\d{2}[-\s]?[a-dA-D]) + ``` + + + + **Expression:** + ``` + \d{1,}(\s{1}\w{1,})(\s{1}?\w{1,}) + ``` + + + + **Expression:** + ``` + (^[\+]?[1]?[\W]?[(]?[0-9]{3}[)]?[-\s\.]?[0-9]{3}[-\s\.]?[0-9]{4}) + ``` + + + + **Expression:** + ``` + ([a-zA-Z]?\d?\d{5,8}) + ``` + + + + **Expression:** + ``` + ((?:\d{2})?\d\d(?:\\)?(?:\/)?\d\d(?:\\)?(?:\/)?\d{2}(?:\d{2})?) + ``` + + + + **Expression:** + ``` + ((?:(?:4\d{3})|(?:5[1-5]\d{2})|6(?:011|5[0-9]{2}))(?:-?|\040?)(?:\d{4}(?:-?|\040?)){3}|(?:3[4,7]\d{2})(?:-?|\040?)\d{6}(?:-?|\040?)\d{5}) + ``` + + + +## Your data compliance responsibilities [#responsibilities] + +New Relic can't guarantee that this functionality completely resolves your data disclosure concerns, nor can we provide support for building out your NRQL queries. We recommend that you: + +* Review your drop filters and confirm they're accurately identifying and discarding the data you want dropped. +* Check that your drop filters are still dropping sensitive information after you've created them. + +While drop filters help ensure that personal information about your end users aren't stored in NRDB, creating the rules themselves imply the kinds of data you maintain, including the format of your data or systems. This is important when determining control permissions for certain users in your org, as certain permissions let users view and edit all information in the rules you create. + +## What's next? [#whats-next] + +Now that you've secured your customer's data, you can explore AI monitoring: + +* [Learn to explore your AI data](/docs/ai-monitoring/view-ai-data). +* Want to adjust your data ingest? [Learn about how to configure AI monitoring](/docs/ai-monitoring/customize-agent-ai-monitoring). +* Did you enable logs? Learn how to [obfuscate sensitive information](/docs/logs/ui-data/obfuscation-ui) from your logs, or [remove entire log messages if they contain sensitive information](/docs/logs/ui-data/drop-data-drop-filter-rules). \ No newline at end of file diff --git a/src/content/docs/ai-monitoring/intro-to-ai-monitoring.mdx b/src/content/docs/ai-monitoring/intro-to-ai-monitoring.mdx new file mode 100644 index 00000000000..b4b5d4990f4 --- /dev/null +++ b/src/content/docs/ai-monitoring/intro-to-ai-monitoring.mdx @@ -0,0 +1,53 @@ +--- +title: 'Introduction to AI monitoring' +metaDescription: 'AI monitoring lets you observe the AI-layer of your tech stack, giving you a holistic overview of the health and performance of your AI-powered app.' +freshnessValidatedDate: never +--- + +import aiTraceViewIntroPage from 'images/ai_screenshot-full_trace-view-intro-page.webp' + +import aiAIResponsesOverview from 'images/ai_screenshot-full_AI-responses-overview.webp' + +When people talk about artificial intelligence, they can mean different things. At New Relic, when we say AI, we mean the layer of your environment that uses a large language model (LLM) to generate a response when it receives an end user prompt. AI monitoring is an APM solution that gives you end-to-end visibility into your AI-powered app. + +With AI monitoring, you can measure the performance of the engine powering your AI app, so that you can ensure your users have the best possible experience. To get started, all you need is to install one of our APM agents and enable AI monitoring. + +A screenshot that shows the trace waterfall page for an individual AI response + +

+ Go to **[one.newrelic.com](https://one.newrelic.com) > AI monitoring > AI responses**, then select a response row. +
+ +## How does AI monitoring work? [#how-it-works] + +To get started with AI monitoring, you'll install one of our APM agents to instrument your app. Instrumentation means that your app can be measured, which lets the agent capture data about app behaviors. Once instrumented, you need to enable AI monitoring on the configuration level. + +Enabling AI monitoring allows the agent to recognize AI metadata associated with AI events. When your AI receives a prompt and returns a response, the agent can recognize data generated from external LLMs and vector stores, and parse out information about tokens usage. + +## Improve AI performance with AI monitoring [#improve-performance] + +A screenshot of the AI responses page + +
+ To overview your AI-powered app's performance: Go to **[one.newrelic.com](https://one.newrelic.com) > AI monitoring > AI responses**. +
+ +AI monitoring can help you answer critical questions about AI app performance: are your end users waiting too long for a response? Is there a recent spike in token usage? Are there patterns of negative user feedback around certain topics? With AI monitoring, you can see data specific to the AI-layer: + +* [Identify errors in specific prompt and response interactions](/docs/ai-monitoring/view-ai-data/#ai-responses) from the response table. If an error occurs, open the [trace waterfall view](/docs/ai-monitoring/view-ai-data/#ai-response-trace-view) to scope to the methods and calls your AI-powered app makes when generating a response. +* If your prompt engineers updated prompt parameters for your AI, you can [track whether token usage spiked or dropped after the update](/docs/ai-monitoring/view-ai-data). Use AI monitoring to help you make decisions that keep costs down. +* Maybe you're fine tuning your app in development, but you want cost and performance efficiency before it goes to production. If you're using different models in different app environments, you can [compare the cost and performance of your apps before deploying](/docs/ai-monitoring/view-ai-data/#model-comparison). + +## Get started with AI monitoring [#get-started] + +Ready to get started? Make sure to [confirm that you can instrument your AI library or framework](/docs/ai-monitoring/compatibility-requirements-ai-monitoring). You may need to update the agent if you've already instrumented your app. + +When you're ready, use our doc to [manually install AI monitoring](/install/ai-monitoring). This doc directs you to the relevant procedures for installing an APM agent, then walks you through configuring the agent for AI monitoring. \ No newline at end of file diff --git a/src/content/docs/ai-monitoring/view-ai-data.mdx b/src/content/docs/ai-monitoring/view-ai-data.mdx new file mode 100644 index 00000000000..f5f89ffc682 --- /dev/null +++ b/src/content/docs/ai-monitoring/view-ai-data.mdx @@ -0,0 +1,186 @@ +--- +title: 'View AI data in New Relic' +metaDescription: 'AI monitoring lets you observe the AI-layer of your tech stack, giving you a holistic overview of the health and performance of your AI-powered app.' +freshnessValidatedDate: never +--- + +import aiIntroAiUi from 'images/ai_screenshot-full_intro-ai-ui.webp' + +import aiTimeseriesBillboard from 'images/ai_screenshot-crop_timeseries-billboard.webp' + +import aiCroppedImageofAIBillboards from 'images/ai_screenshot-crop_billboard.webp' + +import aiCroppedImageofAItimeseries from 'images/ai_screenshot-crop_Cropped-image-of-AI-timeseries.webp' + +import aiAIEntitiesPage from 'images/ai_screenshot-crop_AI-entities-page.webp' + +import aiTopleveAiResponsesSummary from 'images/ai_screenshot-crop_topleve-ai-responses-summary.webp' + +import aiResponseTable from 'images/ai_screenshot-crop_response-table.webp' + +import aiTraceViewAiResponse from 'images/ai_screenshot-full_trace-view-ai-response.webp' + +import aiTraceWaterfallPageSpanDetails from 'images/ai_screenshot-crop_trace-waterfall-page-span-details.webp' + +import aiTraceWaterfallPageErrorDetails from 'images/ai_screenshot-crop_trace-waterfall-page-error-details.webp' + +import aiAiModelComparisonPage from 'images/ai_screenshot-full_ai-model-comparison-page.webp' + +Enabling [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring) allows the agent to recognize and capture performance metrics and trace data about your app's AI-layer. With AI monitoring, you can track token usage, number of completions, and AI response time from your AI-powered app. When you see an error or an inaccurate response, you can scope to a trace-level view on a given prompt-response interaction to identify problems in the logic of your AI service. + +An image that shows the kind of data you get when you enable AI monitoring + +You can view your data by going to **[one.newrelic.com](https://one.newrelic.com) > All Capabilities > AI monitoring**. You can see your data from three different pages: + +* **AI responses**: Overview aggregated data from all your AI entities. Track your AI responses, times, and tokens, or see data about individual prompts and responses. +* **AI entities**: View a table summary of all entities reporting AI data. See entities with standard APM data such as error rate, throughput, and app response time. When you select an entity, you can start exploring the APM **AI responses** page. +* **Compare models**: Compare token usage, response time, and error rate between different models. If you're conducting A/B tests, you can get all the information you need to make decisions about your AI-powered app. + +## AI responses page [#ai-responses] + +The top-level **AI responses** page shows your AI data in aggregate. Aggregated data takes the average total responses, response times, and token usage per response across all entities reporting AI data. On this page, response refers to an output from your AI-powered app when given a prompt. + +If you own several apps with various implementations of different AI frameworks, you can get a general sense for how your AI models perform. + +### Track total responses, average response time, and token usage + +A cropped screenshot displaying the timeseries graphs and billboard info about AI data + +The three tiles show general performance metrics about your AI's responses. These tiles may not tell you the exact cause behind a problem, but they're useful for identifying anomalies in your app's performance. + +A cropped screenshot displaying billboard info about AI data + +* If you notice a drop in total responses or an increase in average response time, it can indicate that some technology in your AI toolchain has prevented your AI-powered app from posting a response. +* A drop or increase in average token usage per response can give you insight into how your model creates a response. Maybe it's pulling too much context, thus driving up token cost while generating its response. Maybe its responses are too spare, leading to lower token costs and unhelpful responses. + +### Adjust the timeseries graphs + +A cropped screenshot displaying timeseries info about AI data + +You can refer to the time series graphs to better visualize when an anamolous behavior first appears. + +* Adjust the timeseries graph by dragging over a spike or drop. This scopes the timeseries to a certain time window. +* Select the drop down to run comparative analysis for different performance parameters. You can choose between total responses, average response time, or average tokens per response. +* If you've enabled the [feedback feature](/docs/ai-monitoring/customize-agent-ai-monitoring), you can scope the graphs to analyze responses by positive and negative feedback. + +### Evaluate individual AI responses + +Your AI response table organizes data about interactions between your end user and AI app. You can view when an interaction occurred, prompts paired with their responses, completion and token count, and which model received a prompt. + +A cropped screenshot displaying the response table from the AI responses view + +You can adjust the table columns by clicking the cog icon in the upper right. This lets you choose the kinds of data you want to analyze. + +The response table is an entry point into viewing trace data about an individual response. Click a row in the table to open the trace view of a particular response. + +### AI response trace view + +A screenshot of the trace view for a particular AI response + +The AI response trace view gives you trace-level insights into how your app generates responses. You may want to look at the trace view to identify where an error occurred, or maybe you want to understand what led to negative feedback from a high token response. From the trace view, you can: + +* Choose between traces or logs. When you select logs, query within logs for text strings or attributes you want to investigate further. +* Toggle between response details or metadata. The response details column shows the user prompt and AI response so you can maintain context for your traces and spans. Metadata provides a list view for entity GUID, model, tokens, and vendor. +* When an error occurs, the waterfall view highlights its row in red. Select the row to open up span data, including the span's error details. + + + + + A screenshot that shows span details + + + + A screenshot that shows error details + + + + +## AI entities page [#entities] + +The AI entities page organizes all your entities currently reporting AI data into a table. This page displays your AI apps alongside response time, throughput, and error rate. + + A screenshot of the first page you see when you click AI Monitoring. View aggregated data, compare your AI models, or create drop filters. +
+ View the entities that report AI data: Go to **[one.newrelic.com](https://one.newrelic.com) > All Capabilities > AI Monitoring** +
+ +Selecting an AI entity takes you to the APM summary page for that app. From the **APM summary page**, select **AI monitoring** in the left nav. + +### APM AI responses page [#apm-ai-response] + +Selecting an AI entity takes you to the APM summary page. To find your AI data, choose **AI responses** in the left nav. We recommend using this page when you've identified that a particular AI entity has contributed to anomalies. + +* The APM version of AI responses contains the same tiles, timeseries graphs, and response tables collected as the top-level AI responses page. +* Instead of showing aggregated data, the APM AI responses page shows data scoped to the service you selected from AI entities. +* While the top-level AI responses page lets you filter by service across all AI entities, the APM AI responses page limits filter functionality to the app's own attributes. + +To review how to explore your AI data, you can follow the same patterns explained in the previous [AI responses section](#ai-responses). + +## Model comparison page [#model-comparison] + +The model comparison page gives you the flexibility to analyze performance depending on the use case you're testing for. You can: + +* Compare how one model performs within an app against the average performance across all services. +* Conduct A/B tests when testing different prompts during prompt engineering. For example, comparing a model's performance and accuracy during one time window with one set of prompts against another time window with a second set of prompts. +* Evaluate how a model performed during a specific time window when customer traffic peaked. + +A screenshot showing the model comparison page. It has annotations to demonstrate the three steps to populate page with data. + +Keep in mind that this page scopes your model comparison data to a single account. If your organization has multiple accounts that own several AI-powered apps, you wouldn't be able to compare model data between those accounts. + +### Understand model cost + +The model cost column breaks down completion events into two parts: the prompt given to the model and the final response the model delivers to the end user. + +* **Tokens per completion**: The token average for all completion events. +* **Prompt tokens**: The token average for prompts. This token average includes prompts created by prompt engineers and end users. +* **Completion tokens**: The number of tokens consumed by the model when it generates the response delivered to the end user. + +When analyzing this column, the value for completion tokens and prompt tokens should equal the value in tokens per completion. + +## What's next? [#whats-next] + +Now that you know how to find your data, you can explore other features that AI monitoring has to offer. + +* Concerned about sensitive information? [Learn to set up drop filters](/docs/ai-monitoring/drop-sensitive-data). +* If you want forward user feedback information about your app's AI responses to New Relic, [follow our instructions to update your app code to get user feedback in the UI](/docs/ai-monitoring/customize-agent-ai-monitoring). diff --git a/src/content/docs/alerts-applied-intelligence/applied-intelligence/anomaly-detection/custom-anomalies.mdx b/src/content/docs/alerts-applied-intelligence/applied-intelligence/anomaly-detection/custom-anomalies.mdx index ef32e6b3884..7833d83bddc 100644 --- a/src/content/docs/alerts-applied-intelligence/applied-intelligence/anomaly-detection/custom-anomalies.mdx +++ b/src/content/docs/alerts-applied-intelligence/applied-intelligence/anomaly-detection/custom-anomalies.mdx @@ -10,6 +10,7 @@ redirects: - /docs/alerts/new-relic-alerts/defining-conditions/create-anomaly-alert-conditions - /docs/alerts-applied-intelligence/new-relic-alerts/alert-conditions/create-anomaly-alert-conditions - /docs/alerts/new-relic-alerts/defining-conditions/create-anomaly-alert-conditions +- /docs/alerts/new-relic-alerts/configuring-alert-policies/create-anomaly-alert-conditions/ freshnessValidatedDate: never --- diff --git a/src/content/docs/alerts-applied-intelligence/applied-intelligence/incident-workflows/incident-workflows.mdx b/src/content/docs/alerts-applied-intelligence/applied-intelligence/incident-workflows/incident-workflows.mdx index afaba46fd61..27e963478db 100644 --- a/src/content/docs/alerts-applied-intelligence/applied-intelligence/incident-workflows/incident-workflows.mdx +++ b/src/content/docs/alerts-applied-intelligence/applied-intelligence/incident-workflows/incident-workflows.mdx @@ -199,7 +199,7 @@ The workflows feature is located under the **Alerts & AI** - Currently, you can only send JSON or numeric outputs to Webhook, Jira, and ServiceNow [destinations](/docs/alerts-applied-intelligence/notifications/destinations/), and only images to other destinations like Slack, Pagerduty, and email. To get better results in Webhook, Jira, and ServiceNow, use a query that has a single-value output, such as `count`, `min`, or `max`. + Currently, you can only send JSON or numeric outputs to Webhook and Jira [destinations](/docs/alerts-applied-intelligence/notifications/destinations/). Also, you can only send images to other destinations like Slack, Pagerduty, ServiceNow (App), and email. To get better results in Webhook and Jira, use a query that has a single-value output, such as `count`, `min`, or `max`. Details on using enrichments: * Enrichments can give additional context on alert notifications by adding NRQL query results to them diff --git a/src/content/docs/alerts-applied-intelligence/new-relic-alerts/advanced-alerts/advanced-techniques/set-thresholds-alert-condition.mdx b/src/content/docs/alerts-applied-intelligence/new-relic-alerts/advanced-alerts/advanced-techniques/set-thresholds-alert-condition.mdx index e44713e594d..c07bfe90270 100644 --- a/src/content/docs/alerts-applied-intelligence/new-relic-alerts/advanced-alerts/advanced-techniques/set-thresholds-alert-condition.mdx +++ b/src/content/docs/alerts-applied-intelligence/new-relic-alerts/advanced-alerts/advanced-techniques/set-thresholds-alert-condition.mdx @@ -27,13 +27,13 @@ When you create a condition, you set personalized **thresholds** For a [condition](/docs/using-new-relic/welcome-new-relic/get-started/glossary#alert-condition), thresholds are the settings that determine what opens an [incident](/docs/new-relic-solutions/get-started/glossary/#alert-incident). Depending on a policy's [issue creation preference](/docs/alerts/new-relic-alerts/configuring-alert-policies/specify-when-new-relic-creates-incidents), and any [workflows](/docs/alerts-applied-intelligence/applied-intelligence/incident-workflows/incident-workflows/) you may have configured, an incident may result in: -* The creation of an issue. -* Notifications being sent. +* The creation of an issue +* Notifications being sent ### There are two types of thresholds: [#threshold-types] -* **Static** - one value set by you. -* **Anomaly** - An [Anomaly](/docs/alerts-applied-intelligence/new-relic-alerts/advanced-alerts/other-condition-types/create-anomaly-alert-conditions/#set-anomaly-thresholds) threshold uses past data to dynamically predict the data's near-future behavior. This will adjust over time as it learns the patterns of your data. +* **Static**: One value set by you. +* **Anomaly**: An [anomaly](/docs/alerts-applied-intelligence/new-relic-alerts/advanced-alerts/other-condition-types/create-anomaly-alert-conditions/#set-anomaly-thresholds) threshold uses past data to dynamically predict the data's near-future behavior. This will adjust over time as it learns the patterns of your data. ### Examples of thresholds: [#threshold-examples] diff --git a/src/content/docs/alerts-applied-intelligence/notifications/notification-integrations.mdx b/src/content/docs/alerts-applied-intelligence/notifications/notification-integrations.mdx index 5230f60c87b..0a80b0479c0 100644 --- a/src/content/docs/alerts-applied-intelligence/notifications/notification-integrations.mdx +++ b/src/content/docs/alerts-applied-intelligence/notifications/notification-integrations.mdx @@ -692,7 +692,9 @@ Custom details in PagerDuty alerts are automatically populated. ### Set up the ServiceNow application destination [#servicenow-destination] - + + If you don't have access to create a new destination, send an email to notificationWorkflows@newrelic.com with your account name and account number for assistance. + To create a ServiceNow destination, follow these steps: 1. [Download](https://store.servicenow.com/sn_appstore_store.do#!/store/application/d117597d1b9b9d9078faddf7b04bcba7/1.0.0?referer=%2Fstore%2Fsearch%3Flistingtype%3Dallintegrations%25253Bancillary_app%25253Bcertified_apps%25253Bcontent%25253Bindustry_solution%25253Boem%25253Butility%25253Btemplate%26q%3Dnew%2520relic&sl=sh) and install the new relic application on the ServiceNow store. diff --git a/src/content/docs/alerts-applied-intelligence/overview.mdx b/src/content/docs/alerts-applied-intelligence/overview.mdx index 1bd8e90b402..44e31e724c2 100644 --- a/src/content/docs/alerts-applied-intelligence/overview.mdx +++ b/src/content/docs/alerts-applied-intelligence/overview.mdx @@ -32,8 +32,6 @@ redirects: - /docs/alerts-applied-intelligence/applied-intelligence/incident-intelligence/basic-alerting-concepts/ - /docs/new-relic-one/use-new-relic-one/new-relic-ai/get-started-incident-intelligence - /docs/alerts-applied-intelligence/new-relic-alerts/get-started -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apis/intro-apis/introduction-new-relic-apis.mdx b/src/content/docs/apis/intro-apis/introduction-new-relic-apis.mdx index b592eb02d7b..f099b26e0f7 100644 --- a/src/content/docs/apis/intro-apis/introduction-new-relic-apis.mdx +++ b/src/content/docs/apis/intro-apis/introduction-new-relic-apis.mdx @@ -60,8 +60,6 @@ redirects: - /docs/full-stack-observability/instrument-everything/develop-your-own-integrations/new-relic-apis - /docs/introduction-new-relic-agent-sdk/ - /docs/apis -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apis/nerdgraph/examples/nerdgraph-api-notifications-channels.mdx b/src/content/docs/apis/nerdgraph/examples/nerdgraph-api-notifications-channels.mdx index e753035df69..d668449a470 100644 --- a/src/content/docs/apis/nerdgraph/examples/nerdgraph-api-notifications-channels.mdx +++ b/src/content/docs/apis/nerdgraph/examples/nerdgraph-api-notifications-channels.mdx @@ -119,7 +119,7 @@ The `channels` query allows you to paginate through all of your channels per acc actor { account(id: YOUR_ACCOUNT_ID) { aiNotifications { - channels(cursor: "") { + channels(cursor: "/8o0y2qiR54m6thkdgHgwg==:jZTXDFKbTkhKwvMx+CtsPVM=") { nextCursor entities { id diff --git a/src/content/docs/apis/nerdgraph/examples/nerdgraph-issues-api-via-github.mdx b/src/content/docs/apis/nerdgraph/examples/nerdgraph-issues-api-via-github.mdx index 8572155a01d..4385a748170 100644 --- a/src/content/docs/apis/nerdgraph/examples/nerdgraph-issues-api-via-github.mdx +++ b/src/content/docs/apis/nerdgraph/examples/nerdgraph-issues-api-via-github.mdx @@ -716,19 +716,15 @@ The `issues` query allows you to paginate through all of your issues per account "Anomaly: It was different from normal" ] }, - ``` + -In order to paginate through your data, you must include the `nextCursor` field on your initial query. - -With cursor pagination, you continue to make requests through the result data until the `nextCursor` returned from the response comes back empty. An empty response means you've arrived at the end of your results. - -Here's an example: +Note that the maximum number of issues that can be returned through pagination is set by the NRQL [LIMIT MAX](/docs.newrelic.com/docs/nrql/nrql-syntax-clauses-functions/#sel-limit), and filtering will be required if this limit is exceeded. ### Initial Request @@ -1413,9 +1409,7 @@ The example below fetches `NewRelicIncident` incidents: ### Cursor pagination -In order to paginate through your data, you must include the nextCursor field on your initial query. - -With cursor pagination, you continue to make a request through the result data until the nextCursor that is returned from the response comes back empty. It means you get to the end of your results. +Note that the maximum number of incidents that can be returned through pagination is set by the NRQL [LIMIT MAX](/docs.newrelic.com/docs/nrql/nrql-syntax-clauses-functions/#sel-limit), and filtering will be required if this limit is exceeded. ### Filter Incidents diff --git a/src/content/docs/apis/nerdgraph/get-started/introduction-new-relic-nerdgraph.mdx b/src/content/docs/apis/nerdgraph/get-started/introduction-new-relic-nerdgraph.mdx index d4079146bca..f0a6c729ea5 100644 --- a/src/content/docs/apis/nerdgraph/get-started/introduction-new-relic-nerdgraph.mdx +++ b/src/content/docs/apis/nerdgraph/get-started/introduction-new-relic-nerdgraph.mdx @@ -16,8 +16,6 @@ redirects: - /docs/apis/graphql-api/get-started/introduction-new-relic-nerdgraph - /docs/apis/nerdgraph - /docs/apis/graphql-api -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/go-agent/configuration/go-agent-configuration.mdx b/src/content/docs/apm/agents/go-agent/configuration/go-agent-configuration.mdx index 01832eafb07..3ac8862ba4b 100644 --- a/src/content/docs/apm/agents/go-agent/configuration/go-agent-configuration.mdx +++ b/src/content/docs/apm/agents/go-agent/configuration/go-agent-configuration.mdx @@ -69,28 +69,28 @@ Here are detailed descriptions of each configuration method: Note the use of `os.Getenv` to read your license key from the environment rather than hard-coding it as a string literal value passed to `newrelic.ConfigLicense`. We recommend that you don't place license keys or other sensitive information in your source code, as that may result in them being stored in your SCM repository and possibly revealed to unauthorized parties. 2. Update values on the `newrelic.Config` struct to configure your application using `newrelic.ConfigOption`s. These are functions that accept a pointer to the `newrelic.Config` struct. Add additional `newrelic.ConfigOption`s to further configure your application. For example, you can use one of the predefined options to do common configurations: - ```go - app, err := newrelic.NewApplication( - newrelic.ConfigAppName("Your Application Name"), - newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - // add debug level logging to stdout - newrelic.ConfigDebugLogger(os.Stdout), - ) - ``` + ```go + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("Your Application Name"), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + // add debug level logging to stdout + newrelic.ConfigDebugLogger(os.Stdout), + ) + ``` 3. Or, you can create your own `newrelic.ConfigOption` to do more complex configurations: - ```go - app, err := newrelic.NewApplication( - newrelic.ConfigAppName("Your Application Name"), - newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - newrelic.ConfigDebugLogger(os.Stdout), - func(config *newrelic.Config) { - // add more specific configuration of the agent within a custom ConfigOption - config.HighSecurity = true - config.CrossApplicationTracer.Enabled = false - }, - ) - ``` + ```go + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("Your Application Name"), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigDebugLogger(os.Stdout), + func(config *newrelic.Config) { + // add more specific configuration of the agent within a custom ConfigOption + config.HighSecurity = true + config.CrossApplicationTracer.Enabled = false + }, + ) + ``` @@ -968,9 +968,11 @@ Not all possible configuration options may be set via environment variables. The (See [note 2](#table-note-two) below) - `NEW_RELIC_LOG` + + `NEW_RELIC_LOG` - `NEW_RELIC_LOG_LEVEL` + `NEW_RELIC_LOG_LEVEL` + `ModuleDependencyMetrics.Enabled` @@ -1199,6 +1201,170 @@ Not all possible configuration options may be set via environment variables. The Setting NEW_RELIC_METADATA_SERVICE_VERSION will create a tag, `tag.service.version` on event data. In this context, the service version is the version of your code that is deployed, in many cases a semantic version such as 1.2.3 but not always. Sending this information allows you to facet your telemetry by the version of the software deployed so you can quickly identify which versions of your software are producing the errors. +## AI monitoring [#ai-monitoring] + +This section includes Go agent configurations for setting up AI monitoring. + + +If distributed tracing is disabled or high security mode is enabled, AI monitoring will not collect AI data. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `false` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_ENABLED` +
+ Configuration function + + `newrelic.ConfigAIMonitoringEnabled` +
+ + When set to `true`, enables AI monitoring. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_STREAMING_ENABLED` +
+ Configuration function + + `newrelic.ConfigAIMonitoringStreamingEnabled` +
+ +When set to `true`, enables the agent to capture streamed responses. If set to `false`, agent won't capture event data about streamed responses, but the agent can still capture metrics and spans. The span duration will end when the LLM function call exits. When set to `true`, the span duration ends when the final result is read from the stream. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED` +
+ Configuration function + + `newrelic.ConfigAIMonitoringRecordContentEnabled()` +
+ +If set to `false`, agent will omit input and output content (like text strings from prompts and responses) captured in LLM events. This is an optional security setting if you don’t want to record sensitive data sent to and received from your LLMs. + +
+
+ ## Custom events configuration [#custom-insights-events-settings] You can create custom events and make them available for querying and analysis. @@ -1671,7 +1837,7 @@ The following settings are used to configure the error collector: [Set in](#options) - < td> + `newrelic.Config` struct @@ -1703,7 +1869,7 @@ The following settings are used to configure the error collector: ```go app, err := newrelic.NewApplication( newrelic.ConfigAppName("Your Application Name"), - newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), func(config *newrelic.Config) { config.ErrorCollector.IgnoreStatusCodes = []int{100, http.StatusAccepted} }, diff --git a/src/content/docs/apm/agents/go-agent/get-started/go-agent-compatibility-requirements.mdx b/src/content/docs/apm/agents/go-agent/get-started/go-agent-compatibility-requirements.mdx index 68a8e3d3d6f..0e21db1bbe8 100644 --- a/src/content/docs/apm/agents/go-agent/get-started/go-agent-compatibility-requirements.mdx +++ b/src/content/docs/apm/agents/go-agent/get-started/go-agent-compatibility-requirements.mdx @@ -528,12 +528,25 @@ The Go agent integrates with other features to give you observability across you - Integration + Capability + + + [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring) + + + + If you have version 3.31.0 or higher of Go agent, you can collect AI data from certain AI libraries and frameworks: + + * [Go OpenAI library](https://github.com/sashabaranov/go-openai) versions 3.4.0 and above + * [AWS SDK for Go v2](https://github.com/aws/aws-sdk-go-v2) versions 1.6.0 and above + + + [Infrastructure monitoring](/docs/infrastructure/new-relic-infrastructure/getting-started/introduction-new-relic-infrastructure) @@ -566,7 +579,7 @@ The Go agent integrates with other features to give you observability across you - [](/docs/browser/new-relic-browser/getting-started/introduction-new-relic-browser) + [Browser monitoring](/docs/browser/new-relic-browser/getting-started/introduction-new-relic-browser) diff --git a/src/content/docs/apm/agents/java-agent/additional-installation/include-java-agent-jvm-argument.mdx b/src/content/docs/apm/agents/java-agent/additional-installation/include-java-agent-jvm-argument.mdx index 88a0a23f857..adcf8c282fe 100644 --- a/src/content/docs/apm/agents/java-agent/additional-installation/include-java-agent-jvm-argument.mdx +++ b/src/content/docs/apm/agents/java-agent/additional-installation/include-java-agent-jvm-argument.mdx @@ -15,8 +15,6 @@ redirects: - /docs/agents/java-agent/installation/include-java-agent-jvm-switch - /docs/agents/java-agent/installation/include-java-agent-jvm-argument tocUnlisted: true -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/java-agent/configuration/java-agent-configuration-config-file.mdx b/src/content/docs/apm/agents/java-agent/configuration/java-agent-configuration-config-file.mdx index d109d2612cb..0ecaf2413ab 100644 --- a/src/content/docs/apm/agents/java-agent/configuration/java-agent-configuration-config-file.mdx +++ b/src/content/docs/apm/agents/java-agent/configuration/java-agent-configuration-config-file.mdx @@ -17,8 +17,6 @@ redirects: - /docs/agents/java-agent/custom-instrumentation/file-location - /docs/java-agent/configuration - /docs/agents/java-agent/configuration -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- @@ -4537,6 +4535,39 @@ Slow transaction detection is set in the `slow_transactions` and can be [overrid Determines how long a transaction has to take in milliseconds for a `SlowTransaction` event to be reported. + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ + If this is set to true, every transaction will be checked for exceeding the defined threshold on + transaction completion. Note that if a large number of transactions exceed the threshold, + this can be computationally expensive since a stack trace is sent with every SlowTransaction event. +
## Span events diff --git a/src/content/docs/apm/agents/java-agent/getting-started/compatibility-requirements-java-agent.mdx b/src/content/docs/apm/agents/java-agent/getting-started/compatibility-requirements-java-agent.mdx index c363c3088dc..4e36457828c 100644 --- a/src/content/docs/apm/agents/java-agent/getting-started/compatibility-requirements-java-agent.mdx +++ b/src/content/docs/apm/agents/java-agent/getting-started/compatibility-requirements-java-agent.mdx @@ -191,6 +191,7 @@ The agent automatically instruments these frameworks and libraries: * SnsClient 2.1.0 to latest * Spray 1.3.1 to latest * Spring 3.0.0.RELEASE to latest + * Spring Batch 4.0.0 to latest * Spring Web Services from 1.5.7 to latest * Spring Webflux 5.0.0.RELEASE to latest * SqsClient 2.1.0 to latest @@ -198,7 +199,7 @@ The agent automatically instruments these frameworks and libraries: * Thrift 0.8.0 to latest * Vert.x 3.2.0 to 4.5.x * ZIO - * Scala 2.13: 1.0.9 to 2.0.0-M2 + * Scala 2.13: 1.0.9 to latest diff --git a/src/content/docs/apm/agents/java-agent/getting-started/introduction-new-relic-java.mdx b/src/content/docs/apm/agents/java-agent/getting-started/introduction-new-relic-java.mdx index 266844501eb..47712f3d810 100644 --- a/src/content/docs/apm/agents/java-agent/getting-started/introduction-new-relic-java.mdx +++ b/src/content/docs/apm/agents/java-agent/getting-started/introduction-new-relic-java.mdx @@ -19,8 +19,6 @@ redirects: - /docs/apm/java - /docs/java/amF2YS1hZ2 - /docs/apm/agents/java-agent -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/net-agent/configuration/net-agent-configuration.mdx b/src/content/docs/apm/agents/net-agent/configuration/net-agent-configuration.mdx index 939f205f905..67a04a61715 100644 --- a/src/content/docs/apm/agents/net-agent/configuration/net-agent-configuration.mdx +++ b/src/content/docs/apm/agents/net-agent/configuration/net-agent-configuration.mdx @@ -1786,6 +1786,7 @@ The `utilization` element supports the following attributes: Use these options to configure which elements of your application and environment to instrument. New Relic for .NET supports the following categories of instrumentation options: * [Instrumentation element](#instrumentation-element) +* [Rules element](#instrumentation-rules) * [Applications element (instrumentation)](#application-instrumentation) * [Attributes element](#agent-attributes) @@ -1793,6 +1794,45 @@ Use these options to configure which elements of your application and environmen The `instrumentation` element is a child of the `configuration` element. By default, the .NET agent instruments IIS asp worker processes and Microsoft Azure web and worker roles. To instrument other processes, see [Instrumenting custom applications](/docs/agents/net-agent/features/instrumenting-custom-applications). +### Rules element (instrumentation) [#instrumentation-rules] + + +This feature is available in .NET Agent 10.21.0 and above. + + +The `rules` element is a child of the `instrumentation` element. The `rules` element supports any number of `ignore` child elements, which instructs the Profiler to NOT instrument any methods defined in the specified assembly. Methods defined in other assemblies will still be instrumented. + +```xml + + + + + +``` + +The ignore rule allows you to optionally define a class name. In the following example, only methods defined in MyNamespace.MyClass in the assembly MyAssembly will be ignored. Other methods in other classes both inside that assembly and within other assemblies will not be ignored by the profiler. + +```xml + + + + + +``` + +More than one ignore rule can be specified. The following example disables both the Confluent Kafka and StackExchange Redis instrumentations. + +```xml + + + + + + +``` + +Please note that custom instrumentation cannot be ignored via the `rules` element. + ### Applications element (instrumentation) [#application-instrumentation] The `applications` element is a child of the `instrumentation` element. The `applications` element supports `application` child elements which specify which non-web apps to instrument. The `application` element contains a `name` attribute. diff --git a/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-core.mdx b/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-core.mdx index e4f10d4c4dd..a9cad612f95 100644 --- a/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-core.mdx +++ b/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-core.mdx @@ -480,7 +480,7 @@ Use SqlClient from [System.Data.SqlClient](https://www.nuget.org/packages/System **Microsoft.Data.SqlClient** * Minimum supported version: 1.0.19239.1 -* Verified compatible versions: 1.0.19239.1, 2.1.5, 3.1.1, 4.1.1, 5.0.1, 5.1.1 +* Verified compatible versions: 1.0.19239.1, 2.1.5, 3.1.1, 4.1.1, 5.0.1, 5.1.1, 5.2.0 @@ -521,7 +521,7 @@ Prior versions of Npgsql may also be instrumented, but duplicate and/or missing Minimum supported version: 2.3.0 -Verified compatible versions: 2.3.0, 2.8.1, 2.13.1, 2.14.1, 2.17.1, 2.19.0, 2.20.0, 2.21.0, 2.22.0, 2.23.0, 2.23.1 +Verified compatible versions: 2.3.0, 2.8.1, 2.13.1, 2.14.1, 2.17.1, 2.19.0, 2.20.0, 2.21.0, 2.22.0, 2.23.0, 2.23.1, 2.24.0 Beginning in agent version 10.12.0, the following methods added in or after driver version 2.7 are instrumented: * IMongoCollection.CountDocuments[Async] @@ -573,7 +573,7 @@ Use [MySql.Data](https://www.nuget.org/packages/MySql.Data/) or [MySQL Connector Minimum supported version: .0.488 -Verified compatible versions: 1.0.488, 1.1.608, 1.2.6, 2.0.601, 2.1.58, 2.2.88, 2.6.66, 2.6.116, 2.7.4, 2.7.10, 2.7.17 +Verified compatible versions: 1.0.488, 1.1.608, 1.2.6, 2.0.601, 2.1.58, 2.2.88, 2.6.66, 2.6.116, 2.7.4, 2.7.10, 2.7.17, 2.7.33 @@ -796,7 +796,7 @@ Use [Elastic.Clients.Elasticsearch](https://www.nuget.org/packages/Elastic.Clien 9.7.0 - 2.0.10, 2.0.12, 2.0.13, 2.0.14 + 2.0.10, 2.0.12, 2.0.13, 2.0.14, 2.0.16 diff --git a/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-framework.mdx b/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-framework.mdx index a8cd3765c2a..2f9125c1c21 100644 --- a/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-framework.mdx +++ b/src/content/docs/apm/agents/net-agent/getting-started/net-agent-compatibility-requirements-net-framework.mdx @@ -548,7 +548,7 @@ Use SqlClient from [System.Data.SqlClient](https://www.nuget.org/packages/System **Microsoft.Data.SqlClient** * Minimum supported version: 1.0.19239.1 -* Verified compatible versions: 1.0.19239.1, 2.1.5, 3.1.1, 4.1.1, 5.0.1, 5.1.1 +* Verified compatible versions: 1.0.19239.1, 2.1.5, 3.1.1, 4.1.1, 5.0.1, 5.1.1, 5.2.0 **System.Data** * Minimum supported version: .NET Framework 4.6.2 @@ -591,7 +591,7 @@ Known incompatible versions: Instance details aren't available in lower version Minimum supported version: 2.3.0 -Verified compatible versions: 2.3.0, 2.8.1, 2.13.1, 2.14.1, 2.17.1, 2.19.0, 2.20.0, 2.21.0, 2.22.0, 2.23.0, 2.23.1 +Verified compatible versions: 2.3.0, 2.8.1, 2.13.1, 2.14.1, 2.17.1, 2.19.0, 2.20.0, 2.21.0, 2.22.0, 2.23.0, 2.23.1. 2.24.0 Beginning in agent version 10.12.0, the following methods added in or after driver version 2.7 are instrumented: * `IMongoCollection.CountDocuments[Async]` @@ -695,7 +695,7 @@ Prior versions of Npgsql may also be instrumented, but duplicate and/or missing * Minimum supported version: 1.0.488 - * Verified compatible versions: 1.0.488, 1.1.608, 1.2.6, 2.0.601, 2.1.58, 2.2.88, 2.6.66, 2.6.116, 2.7.4, 2.7.10, 2.7.17 + * Verified compatible versions: 1.0.488, 1.1.608, 1.2.6, 2.0.601, 2.1.58, 2.2.88, 2.6.66, 2.6.116, 2.7.4, 2.7.10, 2.7.17, 2.7.33 @@ -863,7 +863,7 @@ Use [Elastic.Clients.Elasticsearch](https://www.nuget.org/packages/Elastic.Clien 9.7.0 - 1.2.10, 2.0.5, 2.0.14 + 1.2.10, 2.0.5, 2.0.14, 2.0.16 diff --git a/src/content/docs/apm/agents/net-agent/net-agent-api/seterrorgroupcallback-net-agent-api.mdx b/src/content/docs/apm/agents/net-agent/net-agent-api/seterrorgroupcallback-net-agent-api.mdx index 5aff5d96440..b18334a0028 100644 --- a/src/content/docs/apm/agents/net-agent/net-agent-api/seterrorgroupcallback-net-agent-api.mdx +++ b/src/content/docs/apm/agents/net-agent/net-agent-api/seterrorgroupcallback-net-agent-api.mdx @@ -71,7 +71,7 @@ An empty string may be returned for the error group name when the error can't be `$callback` - {'_Func,string>_'} + _{'Func,string>'}_ diff --git a/src/content/docs/apm/agents/net-agent/troubleshooting/missing-net-async-metrics.mdx b/src/content/docs/apm/agents/net-agent/troubleshooting/missing-net-async-metrics.mdx index 6bb441373cb..ebdd245e21e 100644 --- a/src/content/docs/apm/agents/net-agent/troubleshooting/missing-net-async-metrics.mdx +++ b/src/content/docs/apm/agents/net-agent/troubleshooting/missing-net-async-metrics.mdx @@ -19,7 +19,7 @@ freshnessValidatedDate: never ## Problem -You do not see [async transactions](/docs/agents/net-agent/additional-installation/async-support-net) for WebApi, HttpClient, SqlCommand, SqlDataReader, NpgsqlCommand, or custom instrumentation. This problem typically occurs for apps created with [New Relic's .NET agent](/docs/agents/net-agent/getting-started/compatibility-requirements-net-framework-agent#net-version) under .NET Framework 4.0 or earlier, then migrated to .NET Framework 4.5 or higher. +You do not see [async transactions](/docs/agents/net-agent/additional-installation/async-support-net) for `WebApi`, `HttpClient`, `SqlCommand`, `SqlDataReader`, `NpgsqlCommand`, or custom instrumentation. This problem typically occurs for apps created with [New Relic's .NET agent](/docs/agents/net-agent/getting-started/compatibility-requirements-net-framework-agent#net-version) under .NET Framework 4.0 or earlier, then migrated to .NET Framework 4.5 or higher. ## Solution @@ -28,7 +28,7 @@ You do not see [async transactions](/docs/agents/net-agent/additional-installati A specific `appSetting` or `system.web` setting is required if you are using: * WebApi1 or WebApi2 -* Async methods in HttpClient, SqlCommand, SqlDataReader, or NpgsqlCommand +* Async methods in `HttpClient`, `SqlCommand`, `SqlDataReader`, or `NpgsqlCommand` * Async-related custom transactions or custom instrumentation * New Relic .NET agent version 5.11.53.0 or higher * .NET Framework 4.5 or higher as the target for your app @@ -92,4 +92,4 @@ To enforce additional checks, add the following to `web.config`: Async instrumentation is disabled if the legacy integrated pipeline is present. Before .NET 4.5, the integrated pipeline could cause deadlocks. For more information about this .NET Framework bug, see: * [Why is `HttpContext.Current` null after await?](https://stackoverflow.com/questions/18383923/why-is-httpcontext-current-null-after-await) -* [All about <httpRuntime targetFramework>](https://devblogs.microsoft.com/dotnet/all-about-httpruntime-targetframework/) +* [All about ``](https://devblogs.microsoft.com/dotnet/all-about-httpruntime-targetframework/) diff --git a/src/content/docs/apm/agents/nodejs-agent/getting-started/compatibility-requirements-nodejs-agent.mdx b/src/content/docs/apm/agents/nodejs-agent/getting-started/compatibility-requirements-nodejs-agent.mdx index 67c96dd7cc3..12ba3f55813 100644 --- a/src/content/docs/apm/agents/nodejs-agent/getting-started/compatibility-requirements-nodejs-agent.mdx +++ b/src/content/docs/apm/agents/nodejs-agent/getting-started/compatibility-requirements-nodejs-agent.mdx @@ -752,12 +752,26 @@ The Node.js agent integrates with other features to give you observability acros - Integration + Capability + + + [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring) + + + + If you have version 11.13.0 of the Node.js agent, you can collect AI data from certain AI libraries and frameworks: + + * [OpenAI Node.js API library](https://www.npmjs.com/package/openai/v/4.0.0-beta.4) versions 4.0.0 and above. If your model uses streaming, the Node.js agent supports versions 4.12.2 and above + * [AWS SDK for JavaScript BedrockRuntime Client](https://www.npmjs.com/package/@aws-sdk/client-bedrock-runtime) versions 3.474.0 and above + * [LangChain.js](https://www.npmjs.com/package/langchain/v/0.1.17) versions 0.1.17 and above + + + [Browser monitoring](/docs/browser/new-relic-browser/getting-started/introduction-new-relic-browser) diff --git a/src/content/docs/apm/agents/nodejs-agent/getting-started/introduction-new-relic-nodejs.mdx b/src/content/docs/apm/agents/nodejs-agent/getting-started/introduction-new-relic-nodejs.mdx index f1203ad0c6b..60953da5b22 100644 --- a/src/content/docs/apm/agents/nodejs-agent/getting-started/introduction-new-relic-nodejs.mdx +++ b/src/content/docs/apm/agents/nodejs-agent/getting-started/introduction-new-relic-nodejs.mdx @@ -15,8 +15,6 @@ redirects: - /docs/agents/nodejs-agent/table-of-contents - /docs/apm/agents/nodejs-agent - /docs/agents/nodejs-agent -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/nodejs-agent/installation-configuration/install-nodejs-agent.mdx b/src/content/docs/apm/agents/nodejs-agent/installation-configuration/install-nodejs-agent.mdx index ce8226d6b7f..6f4ae611802 100644 --- a/src/content/docs/apm/agents/nodejs-agent/installation-configuration/install-nodejs-agent.mdx +++ b/src/content/docs/apm/agents/nodejs-agent/installation-configuration/install-nodejs-agent.mdx @@ -14,8 +14,6 @@ redirects: - /docs/agents/nodejs-agent/installation-and-configuration/installing-and-maintaining-nodejs - /docs/agents/nodejs-agent/installation-configuration/installing-maintaining-nodejs - /docs/agents/nodejs-agent/installation-configuration/install-maintain-nodejs -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration.mdx b/src/content/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration.mdx index 830786fa15d..5329e87c8d8 100644 --- a/src/content/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration.mdx +++ b/src/content/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration.mdx @@ -919,6 +919,139 @@ This section defines the Node.js agent variables in the order they typically app +## AI monitoring [#ai-monitoring] + +This section includes Node.js agent configurations for setting up AI monitoring. + + +You must enable [distributed tracing](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#dt-main) to capture AI data. It's turned on by default in Node.js agents 8.3.0 and higher. If you've enabled high security mode, AI monitoring will not work. + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `false` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_ENABLED` +
+ + When set to `true`, enables AI monitoring. Allows the agent to capture LLM event data. + +
+ + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_STREAMING_ENABLED` +
+ +When set to `false`, disables instrumentation for streamed LLM data. Set to `true`, captures streamed data for LLM events. + +
+ + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED` +
+ +If set to `false`, agent omits input and output content (like text strings from prompts and responses) captured in LLM events. This is an optional security setting if you don’t want to record sensitive data sent to and received from your LLMs. +
+
+ ## Audit logging [#audit_log] This section defines the Node.js agent variables in the order they typically appear in the `audit_log: {` section of your app's `newrelic.js` configuration file. @@ -2490,7 +2623,7 @@ This section defines the Node.js agent variables in the order they typically app Defines the maximum number of events the agent collects per minute. If there are more than this number, the agent collects a statistical sampling. - We do not recommend configuring past 10,000. The server will cap data at 10,000 per-minute. + We don't recommend configuring past 10,000. The server will cap data at 10,000 per-minute. This configuration had different behavior in agent versions lower than 6.0.0. See [`max_samples_stored` (DEPRECATED)](#tx_events_max_samples_stored_legacy) for agent versions 5.x or lower. @@ -3004,7 +3137,8 @@ This section defines the Node.js agent variables in the order they typically app - Defines the maximum number of custom events the agent collects per minute. If the number of custom events exceeds this limit, the agent collects a statistical sampling. + * Defines the maximum number of custom events the agent collects per minute. If the number of custom events exceeds this limit, the agent collects a statistical sampling. + * When configuring the agent for [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring), set to max value `100000`. Ensures that the maximum amount of LLM events are captured. Increasing this limit may increase memory usage. @@ -3787,9 +3921,9 @@ The `grpc` section controls the behavior of how the gRPC server is instrumented. - Defines the maximum number of events the agent collects per minute. If there are more than this number, the agent collects a statistical sampling. - - We do not recommend configuring past 10k. The server will cap data at 10k per-minute. + * Defines the maximum number of events the agent collects per minute. If there are more than this number, the agent collects a statistical sampling. + * We do not recommend configuring past 10k. The server will cap data at 10k per-minute. + * When configuring the agent for [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring), set to max value `10000`. Ensures that the maximum amount of traces are captured. `max_samples_stored` configuration settings require [Node.JS agent version 8.3.0 or higher](/docs/apm/agents/nodejs-agent/installation-configuration/update-nodejs-agent/). diff --git a/src/content/docs/apm/agents/php-agent/advanced-installation/docker-other-container-environments-install-php-agent.mdx b/src/content/docs/apm/agents/php-agent/advanced-installation/docker-other-container-environments-install-php-agent.mdx index 875300213f7..317227fcc4d 100644 --- a/src/content/docs/apm/agents/php-agent/advanced-installation/docker-other-container-environments-install-php-agent.mdx +++ b/src/content/docs/apm/agents/php-agent/advanced-installation/docker-other-container-environments-install-php-agent.mdx @@ -9,8 +9,6 @@ redirects: - /docs/agents/php-agent/advanced-installation/docker-other-container-environments-install-php-agent - /docs/install-php-agent-docker - /docs/agents/php-agent/advanced-installation/install-php-agent-docker -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/php-agent/getting-started/introduction-new-relic-php.mdx b/src/content/docs/apm/agents/php-agent/getting-started/introduction-new-relic-php.mdx index 3194e406e84..28623ece45d 100644 --- a/src/content/docs/apm/agents/php-agent/getting-started/introduction-new-relic-php.mdx +++ b/src/content/docs/apm/agents/php-agent/getting-started/introduction-new-relic-php.mdx @@ -20,8 +20,6 @@ redirects: - /docs/agents/php-agent - /docs/apm/agents/php-agent - /docs/agents/php-agent/getting-started/introduction-new-relic-php -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/php-agent/getting-started/php-agent-compatibility-requirements.mdx b/src/content/docs/apm/agents/php-agent/getting-started/php-agent-compatibility-requirements.mdx index 5d218930eee..c7c2d1cb79c 100644 --- a/src/content/docs/apm/agents/php-agent/getting-started/php-agent-compatibility-requirements.mdx +++ b/src/content/docs/apm/agents/php-agent/getting-started/php-agent-compatibility-requirements.mdx @@ -9,8 +9,6 @@ translate: metaDescription: A summary of the New Relic PHP agent's system requirements and the supported PHP frameworks and libraries. redirects: - /docs/agents/php-agent/getting-started/php-agent-compatibility-requirements -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- @@ -191,17 +189,21 @@ The following frameworks are supported: - CakePHP 2.x + [Drupal 7.x, 8.x, 9.1-9.5](/docs/agents/php-agent/frameworks-libraries/drupal-specific-functionality) - Magento 1.x and 2.x, CE and EE + Magento 2.x, CE and EE - CodeIgniter 2.x + Joomla 3.x + + + Joomla 3.x is not supported on PHP 8.x. + @@ -211,7 +213,7 @@ The following frameworks are supported: - [Drupal 6.x, 7.x, 8.x, 9.1-9.5](/docs/agents/php-agent/frameworks-libraries/drupal-specific-functionality) + Laminas 3.x @@ -221,20 +223,17 @@ The following frameworks are supported: - Joomla 3.x - - - Joomla 3.x is not supported on PHP 8.x. - + Laravel 6.x, 7.x, 8.x, 9.x, and 10.x + - Symfony 3.x, 4.x, and 5.x + Symfony 4.x and 5.x - Laminas 3.x + Lumen 6.x, 7.x, 8.x, 9.x, and 10.x @@ -243,25 +242,15 @@ The following frameworks are supported: - - Laravel 6.x, 7.x, 8.x, 9.x, and 10.x - - - Lumen 6.x, 7.x, 8.x, 9.x, and 10.x - + - Zend Framework 1.x, 2.x, and 3.x - - - - Yii 1.x + Zend Framework 3.x - diff --git a/src/content/docs/apm/agents/python-agent/configuration/python-agent-configuration.mdx b/src/content/docs/apm/agents/python-agent/configuration/python-agent-configuration.mdx index c10e60770a7..c6c1803fc2c 100644 --- a/src/content/docs/apm/agents/python-agent/configuration/python-agent-configuration.mdx +++ b/src/content/docs/apm/agents/python-agent/configuration/python-agent-configuration.mdx @@ -1374,6 +1374,138 @@ For more information, see [Python agent attributes](/docs/agents/python-agent/at +## AI monitoring [#ai-monitoring] + +This section includes Python agent configurations for setting up AI monitoring. + + +You must enable [distributed tracing](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration/#dt-main) to capture AI data. It's turned on by default in Python agent versions 7.0.0.166 and higher. + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `false` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_ENABLED` +
+ + When set to `true`, enables AI Large Language Model monitoring. +
+ + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_STREAMING_ENABLED` +
+ +When set to `false`, disables instrumentation that records summary and message events for streamed Large Language Model data. + +
+ + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED` +
+ +If set to `false`, the agent will omit input and output content (like text strings from prompts and responses) captured in LLM events. This is an optional security setting if you don’t want to record sensitive data sent to and received from your LLMs. +
+
+ ## Transaction tracer configuration [#txn-tracer-settings] @@ -3373,7 +3505,7 @@ Event harvest configuration settings include: - `1200` + `3600` @@ -3399,7 +3531,8 @@ Event harvest configuration settings include: - Limit for custom events per minute sent by an instance of the Python agent to New Relic. Custom events are created through the Python Telemetry SDK. + * Limits how many custom events per minute that an instance of the Python agent can send to New Relic. + * When configuring the agent for [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring), set to max value `100000` to ensure the agent captures the maximum amount of LLM events. - Limit for span events per minute sent by an instance of the Python agent to New Relic. + * Limit for span events per minute sent by an instance of the Python agent to New Relic. + * When configuring the agent for [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring), set to max value `10000` to ensure that the agent captures the maximum amount of distributed traces. + + + + [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring) + + + + If you have version 9.9.0 of the Python agent, you can collect AI data from certain AI libraries and frameworks: + + * [OpenAI](https://pypi.org/project/openai/) library versions 0.28.0 and above. + * [Boto3 AWS SDK for Python](https://pypi.org/project/boto3/) version 1.28.57 and above. + * [LangChain](https://pypi.org/project/langchain/) versions 0.1.0 and above. + + + [Browser monitoring](/docs/browser/new-relic-browser/getting-started/introduction-new-relic-browser) diff --git a/src/content/docs/apm/agents/python-agent/getting-started/introduction-new-relic-python.mdx b/src/content/docs/apm/agents/python-agent/getting-started/introduction-new-relic-python.mdx index 4e3b9ad0a30..0450520d67d 100644 --- a/src/content/docs/apm/agents/python-agent/getting-started/introduction-new-relic-python.mdx +++ b/src/content/docs/apm/agents/python-agent/getting-started/introduction-new-relic-python.mdx @@ -18,8 +18,6 @@ redirects: - /docs/apm/python - /docs/agents/python-agent - /docs/apm/agents/python-agent -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/apm/agents/python-agent/python-agent-api/backgroundtask-python-agent-api.mdx b/src/content/docs/apm/agents/python-agent/python-agent-api/backgroundtask-python-agent-api.mdx index df26b7a1d3f..43c1f5498df 100644 --- a/src/content/docs/apm/agents/python-agent/python-agent-api/backgroundtask-python-agent-api.mdx +++ b/src/content/docs/apm/agents/python-agent/python-agent-api/backgroundtask-python-agent-api.mdx @@ -1,12 +1,12 @@ --- title: background_task (Python agent API) type: apiDoc -shortDescription: Used to instrument a background task or other non-web transaction. +shortDescription: Used to instrument a background task or other non-web transaction of finite run time. tags: - Agents - Python agent - Python agent API -metaDescription: 'Python API: Used to instrument a background task or other non-web transaction.' +metaDescription: 'Python API: Used to instrument a background task or other non-web transaction of finite run time.' redirects: - /docs/agents/python-agent/python-agent-api/backgroundtask-python-agent-api - /docs/agents/python-agent/python-agent-api/background_task @@ -19,11 +19,11 @@ freshnessValidatedDate: never newrelic.agent.background_task(application=None, name=None, group=None) ``` -Used to instrument a background task or other non-web transaction. +Used to instrument a background task or other non-web transaction of finite run time. ## Description -This Python decorator can be used to instrument background tasks or other [non-web transactions](/docs/apm/transactions/intro-transactions/monitor-background-processes-other-non-web-transactions). This is typically used to instrument non-web activity like worker processes, job-based systems, and standalone scripts. Transactions marked as background tasks are displayed as non-web transactions in the APM UI and separated from web transactions. +This Python decorator can be used to instrument background tasks or other [non-web transactions](/docs/apm/transactions/intro-transactions/monitor-background-processes-other-non-web-transactions) of finite run time. This is typically used to instrument non-web activity like worker processes, job-based systems, and standalone scripts. Transactions marked as background tasks are displayed as non-web transactions in the APM UI and separated from web transactions. If a function isn't already instrumented, you can use [`function_trace()`](/docs/apm/agents/python-agent/python-agent-api/functiontrace-python-agent-api) to create a function trace for that function within a background task. Or you can use the [config file](/docs/apm/agents/python-agent/custom-instrumentation/python-custom-instrumentation-config-file/#listing_functions) to instrument uninstrumented functions. @@ -239,3 +239,6 @@ result = task(*args, **kwargs) ``` In this example, if this call was made in a web transaction being monitored by an agent, the call will reclassify the web transaction as a background task. If, however, the call was made in a background thread or coroutine, then it would start the recording of a fresh background task transaction that tracks what occurs until the call returns. + +## Troubleshooting +If the background task's transaction is not showing up in the UI, this could be because the transaction that was created by the background task was never exited or the task ran too long. If a background task runs for over 20 minutes, it is dropped by the server because it was considered too old to keep. Background tasks should not run for over 20 minutes and if they do they should be broken up into multiple background tasks. diff --git a/src/content/docs/apm/agents/python-agent/python-agent-api/currenttraceid-python-agent.mdx b/src/content/docs/apm/agents/python-agent/python-agent-api/currenttraceid-python-agent.mdx new file mode 100644 index 00000000000..853fe18c74d --- /dev/null +++ b/src/content/docs/apm/agents/python-agent/python-agent-api/currenttraceid-python-agent.mdx @@ -0,0 +1,42 @@ +--- +title: current_trace_id (Python agent API) +type: apiDoc +shortDescription: Returns the trace ID of the current transaction. +tags: + - Agents + - Python agent + - Python agent API +metaDescription: 'New Relic Python API: Returns the trace ID of the current transaction, which is used by some other calls.' +redirects: + - /docs/agents/python-agent/python-agent-api/currenttraceid-python-agent-api + - /docs/agents/python-agent/python-agent-api/current_trace_id +freshnessValidatedDate: never +--- + +## Syntax [#syntax] + +```py + newrelic.agent.current_trace_id() +``` + +Returns the trace ID of the current transaction or `None` if no transaction exists. + +## Description [#description] + +Use `current_trace_id` to retrieve the trace ID of the current [transaction](/docs/accounts-partnerships/education/getting-started-new-relic/glossary#transaction). + +## Return values [#return-values] + +Returns the trace ID of the current transaction. Returns `None` if there is no active transaction. + +## Examples [#examples] + +### Get the current trace ID [#function-trace-example] + +```py + import newrelic.agent + + @newrelic.agent.background_task() + def main(): + trace_id = newrelic.agent.current_trace_id() +``` diff --git a/src/content/docs/apm/agents/python-agent/python-agent-api/recordllmfeedbackevent-python-agent-api.mdx b/src/content/docs/apm/agents/python-agent/python-agent-api/recordllmfeedbackevent-python-agent-api.mdx new file mode 100644 index 00000000000..de0f1bc4258 --- /dev/null +++ b/src/content/docs/apm/agents/python-agent/python-agent-api/recordllmfeedbackevent-python-agent-api.mdx @@ -0,0 +1,133 @@ +--- +title: record_llm_feedback_event (Python agent API) +type: apiDoc +shortDescription: Records LLM feedback event +tags: + - Agents + - Python agent + - Python agent API +metaDescription: 'Python API: This call records a Large Language Model (LLM) feedback event for querying in the AI Monitoring UI.' +redirects: + - /docs/agents/python-agent/python-agent-api/recordllmfeedbackevent-python-agent-api + - /docs/agents/python-agent/python-agent-api/record_llm_feedback_event +freshnessValidatedDate: never +--- + +## Syntax [#syntax] + +```py + newrelic.agent.record_llm_feedback_event(trace_id, rating, category=None, message=None, metadata=None) +``` + +Records custom feedback events for AI Large Language Model applications. + +## Requirements [#requirements] + +Python agent version 9.8.0 or higher. + +## Description [#description] + +This API records a feedback event `LlmFeedbackMessage` that can be viewed and queried in the New Relic UI. Feedback events correlate trace IDs between an AI-generated message and the feedback an end user submitted about it. To correlate messages with feedback, you can obtain the trace ID of the active transaction via a call to [`current_trace_id`](/docs/apm/agents/python-agent/python-agent-api/currenttraceid-python-agent-api/) right after the call that generates the AI message. Pass the trace ID to the feedback call later when a user provides feedback. + +In many cases, the endpoint for AI messages are recorded in different places from the feedback endpoint. They may happen in different transactions. It's important to: + + 1. Make sure that the trace ID is captured inside the endpoint that generates the AI message. + 2. Pass that trace ID inside the endpoint that records the feedback. + +## Parameters [#parameters] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Parameter + + Description +
+ `trace_id` + + _string_ + + Required. ID of the trace where the chat completion(s) related to the feedback occurred. This ID can be obtained via a call to [`current_trace_id`](/docs/apm/agents/python-agent/python-agent-api/currenttraceid-python-agent-api/). +
+ `rating` + + _string_ or _int_ + + Required. Rating provided by an end user (ex: “Good/Bad”, “1-10”). +
+ `category` + + _string_ + + Optional. Category of the feedback provided by the end user (ex: “informative”, “inaccurate”). +
+ `message` + + _string_ + + Optional. Freeform text feedback from an end user. +
+ `metadata` + + _dict_ + + Optional. Set of key-value pairs to store any other desired data to submit with the feedback event. +
+ +## Return values [#return-valuess] + +None. + +## Examples [#examples] + +### Obtain trace ID and record feedback + +Example of recording a feedback event: + + ```py + import newrelic.agent + + def get_message(request): + trace_id = newrelic.agent.current_trace_id() + + def post_feedback(request): + newrelic.agent.record_llm_feedback_event(trace_id=request.trace_id, rating=request.rating, metadata= {"my_key": "my_val"}) + ``` \ No newline at end of file diff --git a/src/content/docs/apm/agents/python-agent/python-agent-api/setllmtokencountcallback-python-agent-api.mdx b/src/content/docs/apm/agents/python-agent/python-agent-api/setllmtokencountcallback-python-agent-api.mdx new file mode 100644 index 00000000000..e3471af1366 --- /dev/null +++ b/src/content/docs/apm/agents/python-agent/python-agent-api/setllmtokencountcallback-python-agent-api.mdx @@ -0,0 +1,165 @@ +--- +title: set_llm_token_count_callback (Python agent API) +type: apiDoc +shortDescription: Registers a callback to calculate LLM token counts +tags: + - Agents + - Python agent + - Python agent API +metaDescription: 'Python API: This call registers a callback to calculate LLM token counts' +redirects: + - /docs/agents/python-agent/python-agent-api/setllmtokencountcallback-python-agent-api + - /docs/agents/python-agent/python-agent-api/set_llm_token_count_callback +freshnessValidatedDate: never +--- + +## Syntax [#syntax] + +```py + newrelic.agent.set_llm_token_count_callback(callback, application=None) +``` + +Registers a callback function which will be used for calculating token counts on Large Language Model (LLM) events. + +## Requirements [#requirements] + +Python agent version 9.8.0 or higher. + +## Description [#description] + +This API registers a callback to calculate and store token counts on `LlmEmbedding` and `LlmChatCompletionMessage` events. + +* This function should be used when `ai_monitoring.record_content.enabled` is set to `false`. This setting prevents the agent from sending AI content to the New Relic server, where the token counts are attached server side. +* If you'd still like to capture token counts for LLM events, you can implement a callback in your app code to determine the token counts locally and send this information to New Relic. + +In most cases, this API will be called exactly once, but you can make multiple calls to this API. Each fresh call made to the endpoint overwrites the previously registered callback with the new one that is provided. To unset the callback completely, pass `None` in place of the original callback. + + +## API Parameters [#api-parameters] + + + + + + + + + + + + + + + + + + + + + + + +
+ Parameter + + Description +
+ `callback` + + _callable_ or _None_ + + Required. The callback to calculate token counts. To unset the current callback, pass `None` in instead of a callback function. +
+ `application` + + _object_ + + Optional. The specific application object to associate the API call with. An application object can be obtained using the [`newrelic.agent.application`](/docs/apm/agents/python-agent/python-agent-api/application-python-agent-api/) function. +
+ +## Return values [#return-values] + +None. + +## Callback Requirements [#callback-requirements] + +Provided callbacks must return a positive integer token count value or no token count will be captured on LLM events. + +## Callback Parameters [#callback-parameters] + + + + + + + + + + + + + + + + + + + + + + + +
+ Parameter + + Description +
+ `model` + + _string_ + + Required. The name of the LLM model. +
+ `content` + + _string_ + + Required. The message content/ prompt or embedding input. +
+ +## Examples [#examples] + +### Calculating token counts and registering the callback + +Example with tiktoken: + + ```py + import newrelic.agent + def token_count_callback(model, content): + """ + Calculate token counts locally based on the model being used and the content. + This callback will be invoked for each message sent or received during a LLM call. + If the application supports more than one model, it may require finding libraries for + each model to support token counts appropriately. + + Arguments: + model -- name of the LLM model + content -- the LLM message content + """ + import tiktoken + + try: + enc = tiktoken.encoding_for_model(model) + except KeyError: + return None # Unknown model + return len(enc.encode(content)) + + newrelic.agent.set_llm_token_count_callback(token_count_callback) + ``` + +Example API usage with an application object passed in: + + ```py + application = newrelic.agent.register_application(timeout=10.0) + newrelic.agent.set_llm_token_count_callback(token_count_callback, application) + ``` diff --git a/src/content/docs/apm/agents/ruby-agent/api-guides/ruby-ai-monitoring.mdx b/src/content/docs/apm/agents/ruby-agent/api-guides/ruby-ai-monitoring.mdx new file mode 100644 index 00000000000..63f58e9fe0e --- /dev/null +++ b/src/content/docs/apm/agents/ruby-agent/api-guides/ruby-ai-monitoring.mdx @@ -0,0 +1,100 @@ +--- +title: AI monitoring APIs +tags: + - Agents + - Ruby agent + - API guides +metaDescription: 'For information about customizing New Relic''s Ruby agent for AI monitoring.' +freshnessValidatedDate: never +redirects: + - /docs/apm/agents/ruby-agent/api-guides/ruby-ai-monitoring-apis +--- + +When you've instrumented your app for AI monitoring, the New Relic Ruby agent automatically collects many AI metrics, but also provides APIs for collecting information on token count and user feedback. + + + AI monitoring APIs are available in Ruby agent version 9.8.0 and higher. + + +## Token count [#token-count] + +You can set a callback proc for calculating `token_count` attributes for LlmEmbedding and LlmChatCompletionMessage events, and then pass that information to New Relic using the `NewRelic::Agent.set_llm_token_count_callback` API. + +This API should be called only once to set a callback for use with all LLM token calculations. If it is called multiple times, each new callback will replace the old one. The proc will be called with a single hash as its input argument and must return an Integer representing the number of tokens used for that particular prompt, completion message, or embedding. Values less than or equal to 0 will not be attached to an event. + +The hash has the following keys: + + * `:model` (String) - The name of the LLM model + * `:content` (String) - The message content or prompt + +The following example code demonstrates setting a callback that calculates token count and passing that callback to `NewRelic::Agent.set_llm_token_count_callback`. + +```rb + require 'tiktoken_ruby' # Example library for counting GPT model tokens used + + token_count_callback = proc do |hash| + return unless hash[:model].includes?('gpt') + + enc = Tiktoken.encoding_for_model(hash[:model]) + enc.encode(hash[:content]).length + end + + NewRelic::Agent.set_llm_token_count_callback(token_count_callback) + +## User feedback [#user-feedback] + +AI monitoring can correlate trace IDs between a generated message from your AI and the message feedback from an end user using `NewRelic::Agent.record_llm_feedback_event`. + +`NewRelic::Agent.record_llm_feedback_event` accepts the following arguments: + + * `trace_id` (required) - ID of the trace where the chat completion(s) related to the feedback occurred + * `rating` (required) - Rating provided by an end user (ex: 'Good', 'Bad', 1, 2, 5, 8, 10) + * `category` (optional) - Category of the feedback as provided by the end user (ex: “informative”, “inaccurate”) + * `message` (optional) - Freeform text feedback from an end user + * `metadata` (optional) - Set of key-value pairs to store any other desired data to submit with the feedback event + +This API requires the current `trace_id` to correlate messages with feedback, which can be obtained using [NewRelic::Agent::Tracer.current_trace_id](https://www.rubydoc.info/gems/newrelic_rpm/NewRelic/Agent/Tracer#current_trace_id-class_method). + +The following example code uses a Sinatra app to demonstrate collecting the required user feedback and trace_id of a current transaction (along with this API's optional parameters), and then passing those parameters to `NewRelic::Agent.record_llm_feedback_event`. + +```rb +responses = {} + +get '/chat-completion' do + @response_message = client.chat( + parameters: { + model: 'gpt-3.5-turbo', + messages: [ + {'role': 'system', 'content': 'You are a helpful assistant.'}, + ], + temperature: 0.7, + } + ) + + # trace_id must be obtained within the current transaction + trace_id = NewRelic::Agent::Tracer.current_trace_id + responses[@response_message.id] = trace_id + + render(@response_message) +end + +post '/feedback' do + trace_id = responses[@response_message.id] + rating = 1 + category = 'feedback-test' + message = 'Good talk' + metadata = {user: 'new'} + + halt(404) if !responses[@response_message.id] + + NewRelic::Agent.record_llm_feedback_event( + trace_id: responses[@response_message.id], + rating: 1, + category: 'feedback-test', + message: 'Good talk', + metadata: {user: 'new'} + ) + + render('Feedback Recorded') +end +``` diff --git a/src/content/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration.mdx b/src/content/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration.mdx index 44c7b8857ab..9b0df55987a 100644 --- a/src/content/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration.mdx +++ b/src/content/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration.mdx @@ -752,6 +752,144 @@ For information on ignored and expected errors, [see this page on Error Analytic +## AI monitoring [#ai-monitoring] + +This section includes Ruby agent configurations for setting up AI monitoring. + + +You need to enable distributed tracing to capture trace and feedback data. It is turned on by default in Ruby agents 8.0.0 and higher. + + + + + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `false` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_ENABLED` +
+ + When set to `true`, enables AI monitoring. +
+ + + + + + + + + + + + + + + + + + + + + +
+ Type + + String +
+ Default + + `auto` +
+ [Environ variable](#environment) + + `NEW_RELIC_INSTRUMENTATION_RUBY_OPENAI` +
+ + * In high security mode, defaults to `false`. + * Enables the agent to instrument an app. We support `prepend` for module prepending and `chain` for alias method chaining. + * The Ruby agent uses `prepend` by default. + * If you have multiple libraries that update the same class, you can update this configuration to `chain`. + +
+ + + + + + + + + + + + + + + + + + + + + +
+ Type + + Boolean +
+ Default + + `true` +
+ [Environ variable](#environment) + + `NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED` +
+ + * If set to `false`, agent omits input and output content (like text strings from prompts and responses) captured in LLM events. + * Drops `content` attribute from `LlmChatCompletionMessage` events + * Drops `input` attribute froom `LlmEmbedding` events + * This is an optional security setting if you don’t want to record sensitive data sent to and received from your LLMs. +
+
+ ## Browser Monitoring [#browser-monitoring] The [page load timing](/docs/browser/new-relic-browser/page-load-timing/page-load-timing-process) feature (sometimes referred to as real user monitoring or RUM) gives you insight into the performance real users are experiencing with your website. This is accomplished by measuring the time it takes for your users' browsers to download and render your web pages by injecting a small amount of JavaScript code into the header and footer of each page. @@ -913,6 +1051,43 @@ Valid values (ordered lowest to highest): +## Ai Monitoring [#ai-monitoring] + +This section includes Ruby agent configurations for setting up AI monitoring. [Distributed tracing](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#distributed-tracing) must be enabled to capture trace and feedback data. It is turned on by default in Ruby agents 8.0.0 and higher. + + + + + + + + + + +
TypeBoolean
Default`false`
Environ variable`NEW_RELIC_AI_MONITORING_ENABLED`
+ + If `false`, all LLM instrumentation (OpenAI only for now) will be disabled and no metrics, events, or spans will be sent. AI Monitoring is automatically disabled if `high_security` mode is enabled. +
+ + + + + + + + +
TypeBoolean
Default`true`
Environ variable`NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED`
+ + If `false`, LLM instrumentation (OpenAI only for now) will not capture input and output content on specific LLM events. + +The excluded attributes include: + * `content` from LlmChatCompletionMessage events + * `input` from LlmEmbedding events + +
+ +
+ ## Attributes [#attributes] [Attributes](/docs/features/agent-attributes) are key-value pairs containing information that determines the properties of an event or transaction. These key-value pairs can be viewed within transaction traces in APM, traced errors in APM, transaction events in dashboards, and page views in dashboards. You can customize exactly which attributes will be sent to each of these destinations @@ -1333,8 +1508,6 @@ If `true`, enables [cross-application tracing](/docs/agents/ruby-agent/features/ ## Custom Events [#custom-events] - - @@ -1358,9 +1531,10 @@ If `true`, enables [cross-application tracing](/docs/agents/ruby-agent/features/ - Specify a maximum number of custom events to buffer in memory at a time. - + * Specify a maximum number of custom events to buffer in memory at a time. + * When configuring the agent for [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring), set to max value `100000`. Ensures the agent captures the maximum amount of LLM events. +
## Datastore Tracer [#datastore-tracer] @@ -2111,6 +2285,18 @@ Use these settings to toggle instrumentation types during agent startup. Controls auto-instrumentation of `Net::HTTP` at start-up. May be one of: `auto`, `prepend`, `chain`, `disabled`. + + + + + + + +
TypeString
Default`auto`
Environ variable`NEW_RELIC_INSTRUMENTATION_RUBY_OPENAI`
+ + Controls auto-instrumentation of the ruby-openai gem at start-up. May be one of: `auto`, `prepend`, `chain`, `disabled`. +
+ @@ -2231,6 +2417,18 @@ Use these settings to toggle instrumentation types during agent startup. Controls auto-instrumentation of Stripe at startup. May be one of: `enabled`, `disabled`. + +
+ + + + + +
TypeString
Default`"auto"`
Environ variable`NEW_RELIC_INSTRUMENTATION_VIEW_COMPONENT`
+ + Controls auto-instrumentation of ViewComponent at startup. May be one of: `auto`, `prepend`, `chain`, `disabled`. +
+ @@ -2505,7 +2703,7 @@ Use these settings to toggle instrumentation types during agent startup. -## Span Events [#span-events] +## Span events [#span-events] @@ -2544,15 +2742,14 @@ Use these settings to toggle instrumentation types during agent startup.
- Defines the maximum number of span events reported from a single harvest. Any Integer between `1` and `10000` is valid. + * Defines the maximum number of span events reported from a single harvest. Any Integer between `1` and `10000` is valid. + * When configuring the agent for [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring), set to max value `10000`. Ensures that the agent captures the maximum amount of distributed traces.
## Strip Exception Messages [#strip-exception-messages] - - diff --git a/src/content/docs/apm/agents/ruby-agent/getting-started/ruby-agent-requirements-supported-frameworks.mdx b/src/content/docs/apm/agents/ruby-agent/getting-started/ruby-agent-requirements-supported-frameworks.mdx index cbe0187c6b1..b70c3b0bcfa 100644 --- a/src/content/docs/apm/agents/ruby-agent/getting-started/ruby-agent-requirements-supported-frameworks.mdx +++ b/src/content/docs/apm/agents/ruby-agent/getting-started/ruby-agent-requirements-supported-frameworks.mdx @@ -827,12 +827,22 @@ The Ruby agent integrates with other New Relic capabilities to give you end-to-e - Integration + Capability + + + [AI monitoring](/docs/ai-monitoring/intro-to-ai-monitoring) + + + + You can collect AI data using the Ruby agent and the [`ruby-openai` gem](https://github.com/alexrudall/ruby-openai). This feature was introduced in Ruby agent version 9.8.0 and supports `ruby-openai` versions 3.4.0 and above. + + + [Browser monitoring](/docs/browser/new-relic-browser/getting-started/introduction-browser-monitoring) diff --git a/src/content/docs/apm/new-relic-apm/apdex/apdex-measure-user-satisfaction.mdx b/src/content/docs/apm/new-relic-apm/apdex/apdex-measure-user-satisfaction.mdx index da80bc7e305..df1abe91a98 100644 --- a/src/content/docs/apm/new-relic-apm/apdex/apdex-measure-user-satisfaction.mdx +++ b/src/content/docs/apm/new-relic-apm/apdex/apdex-measure-user-satisfaction.mdx @@ -16,8 +16,6 @@ redirects: - /docs/apm/new-relic-apm/apdex/apdex-measuring-user-satisfaction - /docs/apm/new-relic-apm/apdex/view-your-apdex-score/ - /docs/apm/new-relic-apm/apdex/change-your-apdex-settings/ -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/browser/browser-monitoring/browser-pro-features/session-traces-explore-webpages-life-cycle.mdx b/src/content/docs/browser/browser-monitoring/browser-pro-features/session-traces-explore-webpages-life-cycle.mdx index fe6ecacea25..1b07be504e2 100644 --- a/src/content/docs/browser/browser-monitoring/browser-pro-features/session-traces-explore-webpages-life-cycle.mdx +++ b/src/content/docs/browser/browser-monitoring/browser-pro-features/session-traces-explore-webpages-life-cycle.mdx @@ -89,11 +89,11 @@ Session traces help you understand: - Core Web Vitals + Core web vitals - Look at the timing segments for first input delay and largest contentful paint to identify performance issues affecting your Core Web Vitals scores. + Look at the timing segments for interaction to next paint and largest contentful paint to identify performance issues affecting your [core web vitals](/docs/tutorial-improve-site-performance/guide-to-monitoring-core-web-vitals). @@ -230,11 +230,11 @@ The following table describes each segment of the session trace, with a link to - [First input delay](/attribute-dictionary/?event=PageViewTiming&attribute=firstInputDelay) + [Interaction to next paint (INP)](/attribute-dictionary/?event=PageViewTiming&attribute=interactionToNextPaint) - Core Web Vitals: The time from when the request began to when the first browser response to user input was recorded. + Core web vitals: Measures how quickly a web page visually responds to user actions like clicks or taps. diff --git a/src/content/docs/browser/browser-monitoring/getting-started/browser-summary-page.mdx b/src/content/docs/browser/browser-monitoring/getting-started/browser-summary-page.mdx index 7274dfb0df2..3810d746b5a 100644 --- a/src/content/docs/browser/browser-monitoring/getting-started/browser-summary-page.mdx +++ b/src/content/docs/browser/browser-monitoring/getting-started/browser-summary-page.mdx @@ -22,7 +22,7 @@ import browserAddAlert from 'images/browser_screenshot-crop_browser-add-alert.we Our **Summary** page helps you troubleshoot issues with the real user browser performance of your app. Use the **Summary** page to: -* Get core web vitals at a glance, including largest contentful paint (LCP), first input delay (FID), and cumulative layout shift (CLS). +* Get core web vitals at a glance, including largest contentful paint (LCP), interaction to next paint (INP), and cumulative layout shift (CLS). * View trends in an app's browser-side performance. * Quickly troubleshoot page load timing issues. * Go directly to other browser UI pages to examine problems and issues in more detail. @@ -48,7 +48,7 @@ To view a summary of a browser app's performance: Use the **Core web vitals** charts to understand how your browser performs according to [Google's Core Web Vitals](https://web.dev/vitals/#core-web-vitals). These include: * LCP: Largest contentful paint, which measures loading performance -* FID: First input delay, which measures interactivity between user input and browser response +* INP: Interaction to next paint, which measures interactivity between user input and browser response * CLS: Cumulative layout shift, which measures visual stability in the browser page Higher scores can help your organization save money and influence SEO. Lower scores can affect your users' perceptions of your organization's website, which could result in dissatisfied customers or lost business opportunities. @@ -60,7 +60,7 @@ To learn more about monitoring core web vitals, see our [Guide to core web vital In addition to the charts with core web vitals data, evaluate your users' experience with your app by using the following data: * **User time on the site** -* **User-centric page load times** and **Longest first input delay by URL**: To get more details about the [page load timing process](/docs/browser/new-relic-browser/page-load-timing-resources/page-load-timing-process), click the chart's title to go directly to the [**Page views** UI](/docs/browser/new-relic-browser/additional-standard-features/page-views-examine-page-performance). +* **User-centric page load times** and **Longest interaction to next paint by URL**: To get more details about the [page load timing process](/docs/browser/new-relic-browser/page-load-timing-resources/page-load-timing-process), click the chart's title to go directly to the [**Page views** UI](/docs/browser/new-relic-browser/additional-standard-features/page-views-examine-page-performance). ## Locate browser performance problems [#performance] diff --git a/src/content/docs/browser/browser-monitoring/getting-started/introduction-browser-monitoring.mdx b/src/content/docs/browser/browser-monitoring/getting-started/introduction-browser-monitoring.mdx index 30abf1850a1..ae87f6bafb4 100644 --- a/src/content/docs/browser/browser-monitoring/getting-started/introduction-browser-monitoring.mdx +++ b/src/content/docs/browser/browser-monitoring/getting-started/introduction-browser-monitoring.mdx @@ -17,7 +17,6 @@ redirects: - /docs/browser/browser-monitoring - /docs/browser - /docs/browser/new-relic-browser -hidePageTools: true freshnessValidatedDate: never --- diff --git a/src/content/docs/browser/browser-monitoring/installation/install-browser-monitoring-agent.mdx b/src/content/docs/browser/browser-monitoring/installation/install-browser-monitoring-agent.mdx index 37a5796a611..02642c85b1b 100644 --- a/src/content/docs/browser/browser-monitoring/installation/install-browser-monitoring-agent.mdx +++ b/src/content/docs/browser/browser-monitoring/installation/install-browser-monitoring-agent.mdx @@ -17,8 +17,6 @@ redirects: - /docs/browser/new-relic-browser/installation/install-browser-monitoring-agent - /docs/browser/new-relic-browser/installation/monitor-amp-pages-new-relic-browser - /docs/monitor-amp-pages-new-relic-browser -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/browser/new-relic-browser/browser-apis/start.mdx b/src/content/docs/browser/new-relic-browser/browser-apis/start.mdx index 54a2cff8c9d..bec432aee29 100644 --- a/src/content/docs/browser/new-relic-browser/browser-apis/start.mdx +++ b/src/content/docs/browser/new-relic-browser/browser-apis/start.mdx @@ -22,18 +22,17 @@ Browser API used to start agent features when running in a deferred state. ## Requirements * Browser Lite, Pro, or Pro+SPA agent (v1.239.0 or higher) - - -The configuration required to use this API is not currently connected to the larger deployment system within New Relic. As such, calling this API will only have an effect in copy/paste or npm browser installations until further changes are made. - - + + + The configuration required to use this API is not currently connected to the larger deployment system within New Relic. As such, calling this API will only have an effect in copy/paste or npm browser installations until further changes are made. + ## Description -Features can be loaded in a `deferred` state, which can be controlled by setting the appropriate features' `autoStart` property to `false` in the configuration block `NREUM.init.` used by the agent. This feature state means events will be observed and stored in the agent, but *will not be harvested to NR1 until told to do so* with the `.start()` API method. See [Feature Names]('#feature-names') for a list of feature names. See [Examples]('#examples') for examples showing how to set features into a deferred state. +Features can be loaded in a `deferred` state, which can be controlled by setting the appropriate features' `autoStart` property to `false` in the configuration block `NREUM.init.` used by the agent. This feature state means events will be observed and stored in the agent, but *will not be harvested to NR1 until told to do so* with the `.start()` API method. See [Feature Names](#feature-names) for a list of feature names. See [Examples](#examples) for examples showing how to set features into a deferred state. Upon executing this function with a valid value, the browser agent will start the relevant features that have been deferred by the `autoStart: false` configuration. If called with no arguments, the method will start all features that have been deferred. -If called with a list of strings representing the feature names, the feature names matching the strings will be started. See [Feature Names]('#feature-names') for a list of feature names. +If called with a list of strings representing the feature names, the feature names matching the strings will be started. See [Feature Names](#feature-names) for a list of feature names. ## Parameters diff --git a/src/content/docs/browser/new-relic-browser/browser-pro-features/browsers-problem-patterns-type-or-platform.mdx b/src/content/docs/browser/new-relic-browser/browser-pro-features/browsers-problem-patterns-type-or-platform.mdx index b23ad59b749..f6e4c1fbfc4 100644 --- a/src/content/docs/browser/new-relic-browser/browser-pro-features/browsers-problem-patterns-type-or-platform.mdx +++ b/src/content/docs/browser/new-relic-browser/browser-pro-features/browsers-problem-patterns-type-or-platform.mdx @@ -37,7 +37,7 @@ In New Relic , the **Browsers**< This page includes: * Top browsers by throughput (page views per minute or `ppm`) and page load time -* Core Web Vitals data, including largest contentful paint (LCP), first input delay (FID), cumulative layout shift score (CSL), and more +* Core web vitals data, including largest contentful paint (LCP), interaction to next paint (INP), cumulative layout shift score (CSL), and more * Top JavaScript errors by error, with a link to more details in the [JS errors UI](/docs/browser/new-relic-browser/browser-pro-features/javascript-errors-page-detect-analyze-errors/) * Filterable data by device type (mobile, tablet, desktop) diff --git a/src/content/docs/browser/new-relic-browser/browser-pro-features/geography-webpage-metrics-location.mdx b/src/content/docs/browser/new-relic-browser/browser-pro-features/geography-webpage-metrics-location.mdx index 4916f4cb4e2..d45c0cc481b 100644 --- a/src/content/docs/browser/new-relic-browser/browser-pro-features/geography-webpage-metrics-location.mdx +++ b/src/content/docs/browser/new-relic-browser/browser-pro-features/geography-webpage-metrics-location.mdx @@ -47,7 +47,7 @@ Depending on the type of performance or usage data you select, the list next to To view or sort your browser monitoring performance data by location: 1. Go to **[one.newrelic.com > All capabilities](https://one.newrelic.com/all-capabilities) > Browser > ** (select an app) **> Geography**. -2. Use the dropdown to select the type of performance or usage data you want to view, such as page load or unique session count, average time for various processes, largest contentful paint, first input delay, cumulative layout shift, etc. +2. Use the dropdown to select the type of performance or usage data you want to view, such as page load or unique session count, average time for various processes, largest contentful paint, interaction to next paint, cumulative layout shift, etc. 3. To drill down to a specific area, mouse over or select any area on the geographical map, or select any of the locations on the list. 4. To view specific performance data, [filter any attributes](#filter) below the funnel icon. 5. To compare page load and network performance with historical data, review the charts below the map on the **Map view** and **Key attribute charts** tabs. diff --git a/src/content/docs/browser/new-relic-browser/configuration/proxy-agent-requests.mdx b/src/content/docs/browser/new-relic-browser/configuration/proxy-agent-requests.mdx index 4c1b25e6994..e3d23fd7db5 100644 --- a/src/content/docs/browser/new-relic-browser/configuration/proxy-agent-requests.mdx +++ b/src/content/docs/browser/new-relic-browser/configuration/proxy-agent-requests.mdx @@ -11,9 +11,9 @@ freshnessValidatedDate: 2023-09-22 This feature is currently available for those using the copy/paste or NPM browser installation methods. There is currently no UI or NerdGraph configuration options available. We are continuing to work on improving access to these and other configuration options.
-Setting a proxy URL for the browser agent can be valuable because it can help you to get around ad blockers and security VPNs that block client requests to third-party domains. +Setting a proxy URL for the browser agent can be a valuable way to ensure that observability data is still collected for performance purposes. -Ad blockers and security VPNs often block third-party domains because they can be used to track users across the web. By proxying your requests through a first-party subdomain, you can bypass these blockers and ensure that your observability data is still being collected. +When you use the proxy method, it is important to ensure that you have the right to do so based on any contractual, regulatory, or other legal obligations you may have to your end users and/or site visitors. If you have verified that this method is a viable and compliant solution for your organization, you can look at the setup instructions below. ## How it works [#how-it-works] diff --git a/src/content/docs/browser/new-relic-browser/getting-started/browser-apps-index.mdx b/src/content/docs/browser/new-relic-browser/getting-started/browser-apps-index.mdx index 314d57d8e40..619ac7142fe 100644 --- a/src/content/docs/browser/new-relic-browser/getting-started/browser-apps-index.mdx +++ b/src/content/docs/browser/new-relic-browser/getting-started/browser-apps-index.mdx @@ -23,8 +23,8 @@ The browser index page provides a list of your existing apps being monitored by ## Browser apps index features [#index] browser_index.png @@ -125,8 +125,8 @@ Here are some tips for using the browser applications index. From any page in browser, select **Browser applications**, located above the name of your application. browser_return_index.png diff --git a/src/content/docs/browser/new-relic-browser/lab/debug-slowness.mdx b/src/content/docs/browser/new-relic-browser/lab/debug-slowness.mdx index ebb8e9f5de7..e41cbde17e1 100644 --- a/src/content/docs/browser/new-relic-browser/lab/debug-slowness.mdx +++ b/src/content/docs/browser/new-relic-browser/lab/debug-slowness.mdx @@ -102,7 +102,7 @@ Here, you see all the data related to your browser application including -Notice the **Largest Contentful Paint (LCP)**. +Notice the **largest contentful paint (LCP)**. **Largest Contentful Paint (LCP)**
. src={LCP} /> -Largest Contentful Paint (LCP) represents how quickly the main content of a web page is loaded. Ideally, the content should not take more than a second or two to load. +Largest contentful paint (LCP) represents how quickly the main content of a web page is loaded. Ideally, the content should not take more than a second or two to load. Here, you see that your site is loading in more than 5 seconds. No wonder your users are complaining! But what's causing this delay? back end? diff --git a/src/content/docs/browser/new-relic-browser/lab/install-browser-agent.mdx b/src/content/docs/browser/new-relic-browser/lab/install-browser-agent.mdx index 6eeb0fe0bf2..656ff9500f4 100644 --- a/src/content/docs/browser/new-relic-browser/lab/install-browser-agent.mdx +++ b/src/content/docs/browser/new-relic-browser/lab/install-browser-agent.mdx @@ -45,8 +45,8 @@ Navigate to [New Relic](https://one.newrelic.com/), and sign in with your accoun On the right side of the upper navigation bar, click **Add data**. Ass Data diff --git a/src/content/docs/browser/new-relic-browser/page-load-timing-resources/pageviewtiming-async-or-dynamic-page-details.mdx b/src/content/docs/browser/new-relic-browser/page-load-timing-resources/pageviewtiming-async-or-dynamic-page-details.mdx index 74c7ed1aa69..5d57ed089c4 100644 --- a/src/content/docs/browser/new-relic-browser/page-load-timing-resources/pageviewtiming-async-or-dynamic-page-details.mdx +++ b/src/content/docs/browser/new-relic-browser/page-load-timing-resources/pageviewtiming-async-or-dynamic-page-details.mdx @@ -40,9 +40,9 @@ Note that the metrics that make up Core Web Vitals [evolve](https://web.dev/vita Core Web Vitals metrics include loading, interactivity, and visual stability. -* **[Largest Contentful Paint (LCP)](https://web.dev/lcp/)**: measures loading performance. To provide a good user experience, LCP should occur **within 2.5 seconds** of when the page first starts loading. -* **[First Input Delay (FID)](https://web.dev/fid/)**: measures interactivity. To provide a good user experience, pages should have a FID of **less than 100 milliseconds**. -* **[Cumulative Layout Shift (CLS)](https://web.dev/cls/)**: measures visual stability. To provide a good user experience, pages should maintain a CLS of **less than 0.1**. +* **[Largest contentful paint (LCP)](https://web.dev/lcp/)**: measures loading performance. To provide a good user experience, LCP should occur **within 2.5 seconds** of when the page first starts loading. +* **[Interaction to next paint (INP)](https://web.dev/inp/)**: measures latency of all user interactions with a page. To provide a good user experience, pages should have a INP of **less than 200 milliseconds**. +* **[Cumulative layout shift (CLS)](https://web.dev/cls/)**: measures visual stability. To provide a good user experience, pages should maintain a CLS of **less than 0.1**. For each of these metrics, to ensure you're hitting the recommended target for most of your users, a good threshold to measure is the **75th percentile** of page loads, segmented across mobile and desktop devices. @@ -94,25 +94,23 @@ The `BrowserInteraction` and `PageView` events end their reporting when they rec We also report the cumulative layout shift (CLS) score attribute with LCP. This attribute is reported as `cumulativeLayoutShift`. - Largest Contentful Paint is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). LCP values up to 2.5 secs are considered "Good," between 2.5-4.0 secs are considered "Needs Improvement," and above 4.0 secs are considered "Poor." + Largest contentful paint is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). LCP values up to 2.5 secs are considered "Good," between 2.5-4.0 secs are considered "Needs Improvement," and above 4.0 secs are considered "Poor." - `firstInteraction` and `firstInputDelay` + `firstInteraction` and `interactionToNextPaint` - With the addition of [`firstInteraction`](/attribute-dictionary/?event=PageViewTiming&attribute=firstInteraction) and [`firstInputDelay`](/attribute-dictionary/?event=PageViewTiming&attribute=firstInputDelay), you can quickly determine the ways that your users are interacting with that visual content. These metrics tell you not only when they interacted, but what type of interaction (mousedown, pointerdown, etc.) and how long it took for them to receive a response from your site. + With the addition of [`firstInteraction`](/attribute-dictionary/?event=PageViewTiming&attribute=firstInteraction) and [`interactionToNextPaint`](/attribute-dictionary/?event=PageViewTiming&attribute=interactionToNextPaint), you can quickly determine the ways that your users are interacting with that visual content. These metrics tell you not only when they interacted, but what type of interaction (mousedown, pointerdown, etc.) and how long it took for them to receive a response from your site. - The `firstInputDelay` metric lies in the middle of `FirstContentfulPaint` and Time to Interactive (TTI) metrics. It measures the time between when a first input can be made and when the browser's main thread is able to respond to any interactions. + The `interactionToNextPaint` metric lies in the middle of `FirstContentfulPaint` and Time to Interactive (TTI) metrics. It measures the time between when a first input can be made and when the browser's main thread is able to respond to any interactions. We also report the cumulative layout shift (CLS) score attribute at the moment of the user's first interaction. This attribute is reported as `cumulativeLayoutShift.` - First Input Delay is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). FID values up to 100 ms are considered "Good," between 100-300 ms are considered "Needs Improvement," and above 300 ms are considered "Poor." - - For a more detailed explanation, see our [ release notes](/docs/release-notes/new-relic-browser-release-notes/browser-agent-release-notes/browser-agent-v1153). + INP is one of three metrics identified by Google as the [core web vitals](https://web.dev/vitals/). An INP score of 200 ms or less is considered "Good," between 200-500 ms is considered "Needs Improvement," and above 500 ms is considered "Poor." @@ -122,9 +120,9 @@ The `BrowserInteraction` and `PageView` events end their reporting when they rec - [Cumulative Layout Shift (CLS)](https://web.dev/cls/) is available with [agent v1177 or higher](/docs/release-notes/new-relic-browser-release-notes/browser-agent-release-notes/browser-agent-v1177). CLS is an important, user-centric metric for measuring [visual stability](https://web.dev/user-centric-performance-metrics/#types-of-metrics) because it helps quantify how often users experience unexpected layout shifts. A low CLS helps ensure that the page is [delightful](https://web.dev/user-centric-performance-metrics/#questions). This is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). + [Cumulative layout shift (CLS)](https://web.dev/cls/) is available with [agent v1177 or higher](/docs/release-notes/new-relic-browser-release-notes/browser-agent-release-notes/browser-agent-v1177). CLS is an important, user-centric metric for measuring [visual stability](https://web.dev/user-centric-performance-metrics/#types-of-metrics) because it helps quantify how often users experience unexpected layout shifts. A low CLS helps ensure that the page is [delightful](https://web.dev/user-centric-performance-metrics/#questions). This is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). - Cumulative Layout Shift is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). CLS scores up to 0.1 are considered "Good," between 0.1-0.25 are considered "Needs Improvement," and above 0.25 are considered "Poor." + Cumulative layout shift is one of three metrics identified by Google as the [Core Web Vitals](https://web.dev/vitals/). CLS scores up to 0.1 are considered "Good," between 0.1-0.25 are considered "Needs Improvement," and above 0.25 are considered "Poor." @@ -134,7 +132,7 @@ The `BrowserInteraction` and `PageView` events end their reporting when they rec - [Interaction to Next Paint (INP)](https://web.dev/inp/) is available with [agent v1227 or higher](/docs/release-notes/new-relic-browser-release-notes/browser-agent-release-notes/browser-agent-v1227). INP is a newer metric for measuring [runtime responsiveness](https://web.dev/user-centric-performance-metrics/#types-of-metrics) and user-perceived performance. It measures the largest latency between user interactions and page response or repaints. This is an experimental but identified-as-significant metric added in [Web Vitals](https://github.com/GoogleChrome/web-vitals) v3. + [Interaction to next paint (INP)](https://web.dev/inp/) is available with [agent v1227 or higher](/docs/release-notes/new-relic-browser-release-notes/browser-agent-release-notes/browser-agent-v1227). INP is a newer metric for measuring [runtime responsiveness](https://web.dev/user-centric-performance-metrics/#types-of-metrics) and user-perceived performance. It measures the largest latency between user interactions and page response or repaints. This is an experimental but identified-as-significant metric added in [Web Vitals](https://github.com/GoogleChrome/web-vitals) v3. INP scores up to 200 ms are considered "Good," between 200-500 ms are considered "Needs Improvement," and above 500 ms are considered "Poor." @@ -257,7 +255,7 @@ These metrics are supported by the following browser versions. For unsupported b * Chrome 79 * Metric is elevated to stable; changes in metric definition will be reported in this log. * Chrome 77 - * Metric exposed via API: [Cumulative Layout Shift](https://web.dev/cls/) available via [Layout Instability API](https://github.com/WICG/layout-instability) + * Metric exposed via API: [Cumulative layout shift](https://web.dev/cls/) available via [Layout Instability API](https://github.com/WICG/layout-instability) @@ -285,24 +283,6 @@ These metrics are supported by the following browser versions. For unsupported b * Chrome 77 or higher for desktop and mobile - - - - `firstInteraction` - - `firstInputDelay` - - - - These metrics require the [`addEventListener` browser API](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget/addEventListener). This API is available in all modern browsers, including: - - * Apple Safari - * Google Chrome - * Microsoft Internet Explorer (IE) versions 9 or higher - * Mozilla Firefox - - - `interactionToNextPaint` @@ -364,7 +344,7 @@ These metrics are supported by the following browser versions. For unsupported b ## CumulativeLayoutShift [#cumulative-layout-shift] -Cumulative Layout Shift (CLS) is a metric measuring the stability of the content on a webpage. For a complete description, see [web.dev/cls](https://web.dev/cls/). +Cumulative layout shift (CLS) is a metric measuring the stability of the content on a webpage. For a complete description, see [web.dev/cls](https://web.dev/cls/). ### How is CLS captured in New Relic @@ -410,10 +390,10 @@ Here are some sample queries for the event data to help you get started. id="percentile-transaction" title="Percentile by transaction and interaction" > - Show the 95th percentile of first input delay over a time series, faceted by transaction name and interaction type: + Show the 95th percentile of interaction to next paint over a time series, faceted by transaction name and interaction type: ``` - SELECT percentile(firstInputDelay, 95) as 'fid' FROM PageViewTiming WHERE timingName = 'firstInteraction' TIMESERIES 1 minute FACET browserTransactionName, interactionType SINCE 3 hours ago + SELECT percentile(interactionToNextPaint, 75) as 'INP' FROM PageViewTiming WHERE timingName = 'interactionToNextPaint' TIMESERIES 1 minute FACET browserTransactionName, interactionType SINCE 3 hours ago ``` @@ -421,10 +401,10 @@ Here are some sample queries for the event data to help you get started. id="histogram-delay" title="Histogram of delay timings" > - Show a histogram of first input delay timings faceted by first interaction time ranges: + Show a histogram of interaction to next paint timings faceted by first interaction time ranges: ``` - FROM PageViewTiming SELECT histogram(firstInputDelay, 1000, 10) SINCE 3 hours ago WHERE timingName = 'firstInteraction' FACET CASES (WHERE firstInteraction < 1, WHERE firstInteraction >= 1 AND firstInteraction < 5, WHERE firstInteraction >= 5) + FROM PageViewTiming SELECT percentile(interactionToNextPaint, 75) WHERE entityGuid = ‘xyz’ FACET `pageUrl` SINCE 24 hours ago ``` diff --git a/src/content/docs/codestream/observability/code-level-metrics.mdx b/src/content/docs/codestream/observability/code-level-metrics.mdx index 2e775b684ac..bb376e3f47b 100644 --- a/src/content/docs/codestream/observability/code-level-metrics.mdx +++ b/src/content/docs/codestream/observability/code-level-metrics.mdx @@ -2,7 +2,7 @@ title: Code-level metrics metaDescription: "See performance data at the method level." redirects: -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-14 --- import codestreamCodeLevelMetricsExample from 'images/codestream_screenshot-crop_code-level-metrics-example.webp' @@ -29,12 +29,24 @@ Click the CodeLens to see charts visualizing each of the metrics. If the reposit Along with a chart of the error rate you’ll also see a list of the actual errors happening in the same timeframe, including the number of occurrences for each. If the error rate is spiking, and you see one particular error is causing the problem, you can click on it to view the stack trace and [start collaborating](/docs/codestream/observability/error-investigation) on resolution. -## Requirements [#requirements] +## Coverage [#coverage] -To see in-editor performance data, your service must meet the requirements listed below, and the service should have collected data in the last 30 minutes. The New Relic agent attempts to automatically collect data for functions on classes that are tied to HTTP requests. In many cases, and with many frameworks that use an MVC framework, these are often methods on a Controller class. +The New Relic agent attempts to automatically collect data for functions on classes that are tied to HTTP requests. In many cases, and with many frameworks that use an MVC framework, these are often methods on a Controller class. Because all requests by the New Relic agent are not collected all the time, it’s possible that low-traffic methods won't see any data. If data is missing for a specific method that you wish to see results for, you can use custom instrumentation to fill any gaps. See guidance for [Java](/docs/apm/agents/java-agent/custom-instrumentation/java-custom-instrumentation), [.NET](/docs/apm/agents/net-agent/custom-instrumentation/introduction-net-custom-instrumentation), [PHP](/docs/apm/agents/php-agent/features/php-custom-instrumentation), [Python](/docs/apm/agents/python-agent/custom-instrumentation/python-custom-instrumentation), [Ruby](/docs/apm/agents/ruby-agent/api-guides/ruby-custom-instrumentation), [Go](/docs/apm/agents/go-agent/instrumentation/instrument-go-transactions) and [Node.js](/docs/apm/agents/nodejs-agent/extend-your-instrumentation/nodejs-custom-instrumentation). +Run the following NRQL query to identify where you might see CodeLenses with code-level metrics for one of your services. Just replace the example `entity.guid` value with the one for the given service. In the query results, look at the `code.*` attributes to see if any of them represent your code, and not framework code. + +``` +SELECT * +FROM Span +WHERE entity.guid = 'MXxBUE18QVBQTElDQVRJT058MjM' AND code.function is not null +SINCE 30 minutes ago LIMIT MAX +``` +## Requirements [#requirements] + +To see in-editor performance data, your service must meet the requirements listed below, and the service should have collected data in the last 30 minutes. + * [Distributed tracing](/docs/distributed-tracing/enable-configure/language-agents-enable-distributed-tracing) must be enabled. Distributed tracing is on by default in recent versions of all agents, so you only need to worry about this if you've disabled distributed tracing. * **Go:** Requires Go agent version 3.24.0 or higher. If you're using VS Code, you must also have the [Go for VS Code](https://marketplace.visualstudio.com/items?itemName=golang.go) extension installed with the language server enabled. * **Java:** Requires Java agent version 7.11.0 or higher. Code-level metrics are only available for projects implemented with a [supported Java framework](/docs/apm/agents/java-agent/getting-started/compatibility-requirements-java-agent/#frameworks). If you're using VS Code, you must also have the [Language Support for Java by Red Hat](https://marketplace.visualstudio.com/items?itemName=redhat.java) extension installed. Note that Kotlin support is only available in IntelliJ. diff --git a/src/content/docs/codestream/start-here/what-is-codestream.mdx b/src/content/docs/codestream/start-here/what-is-codestream.mdx index e356aa3b081..933ed77db3f 100644 --- a/src/content/docs/codestream/start-here/what-is-codestream.mdx +++ b/src/content/docs/codestream/start-here/what-is-codestream.mdx @@ -4,8 +4,6 @@ description: "New Relic CodeStream makes production telemetry accessible where d redirects: - /docs/codestream - /docs/codestream/start-here/codestream-new-relic -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/data-apis/custom-data/aws-privatelink.mdx b/src/content/docs/data-apis/custom-data/aws-privatelink.mdx index 9b28d8b413f..95f3098c594 100644 --- a/src/content/docs/data-apis/custom-data/aws-privatelink.mdx +++ b/src/content/docs/data-apis/custom-data/aws-privatelink.mdx @@ -211,11 +211,23 @@ These are the New Relic endpoint services available via AWS PrivateLink: `com.amazonaws.vpce.us-east-2.vpce-svc-0bf91fb637cf37b4f` + + + + Synthetics job manager + + + `synthetics-horde.nr-data.net` + + + `com.amazonaws.vpce.us-east-2.vpce-svc-09230bb8d16a9171e` + + - Review the following constraints when configuring the `identity-api.newrelic.com` and `infrastructure-command-api.newrelic.com` hostnames: + Review the following constraints when configuring the `identity-api.newrelic.com`, `infrastructure-command-api.newrelic.com` or `synthetics-horde.nr-data.net` hostnames: - These are only exposed in the `us-east-2` (Ohio) region. - The endpoint service does not have an associated DNS private name. Create a PrivateLink connected to this service endpoint, and create the Private Hosted Zone (PHZ) for each hostname. @@ -335,6 +347,7 @@ These are the New Relic endpoint services available via AWS PrivateLink: `com.amazonaws.vpce.eu-central-1.vpce-svc-04308d96cf1012913` + diff --git a/src/content/docs/data-apis/custom-data/custom-events/collect-custom-attributes.mdx b/src/content/docs/data-apis/custom-data/custom-events/collect-custom-attributes.mdx index 283c09460ed..fb88048d799 100644 --- a/src/content/docs/data-apis/custom-data/custom-events/collect-custom-attributes.mdx +++ b/src/content/docs/data-apis/custom-data/custom-events/collect-custom-attributes.mdx @@ -96,6 +96,7 @@ To enable and use custom attributes for APM, follow the procedure for your Custom attribute collection is enabled by default in Java. You can collect custom attributes using XML and the Java agent APIs. These two methods can be used in conjunction with each other. + Note that collecting custom attributes requires that the [New Relic Java API jar](/docs/apm/agents/java-agent/api-guides/guide-using-java-agent-api) be in the application's classpath. diff --git a/src/content/docs/data-apis/ingest-apis/event-api/introduction-event-api.mdx b/src/content/docs/data-apis/ingest-apis/event-api/introduction-event-api.mdx index dbc26b5a44a..7e5044dd466 100644 --- a/src/content/docs/data-apis/ingest-apis/event-api/introduction-event-api.mdx +++ b/src/content/docs/data-apis/ingest-apis/event-api/introduction-event-api.mdx @@ -35,8 +35,6 @@ redirects: - /docs/insights/insights-api - /docs/insights/insights-api/insert-data - /docs/data-apis/ingest-apis/introduction-event-api -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/data-apis/manage-data/manage-data-coming-new-relic.mdx b/src/content/docs/data-apis/manage-data/manage-data-coming-new-relic.mdx index fa077bee7ed..46932521e01 100644 --- a/src/content/docs/data-apis/manage-data/manage-data-coming-new-relic.mdx +++ b/src/content/docs/data-apis/manage-data/manage-data-coming-new-relic.mdx @@ -93,7 +93,6 @@ Options for adjusting data include: * Configure the sampling rate for transaction events. See agent configurations for [Java](/docs/agents/java-agent/configuration/java-agent-configuration-config-file#Transaction_Events), [.Net](/docs/apm/agents/net-agent/configuration/net-agent-configuration), [Go](/docs/apm/agents/go-agent/configuration/go-agent-configuration#transaction-events-settings), [NodeJS](/docs/apm/agents/nodejs-agent/installation-configuration/nodejs-agent-configuration), [PHP](/docs/apm/agents/php-agent/configuration/php-agent-configuration), [Python](/docs/apm/agents/python-agent/configuration/python-agent-configuration), or [Ruby](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration). * Adjust [distributed trace sampling](/docs/distributed-tracing/concepts/how-new-relic-distributed-tracing-works) -* [Set appropriate Apdex scores](/docs/apm/new-relic-apm/apdex/change-your-apdex-settings/), for example, for frequency of traces. * Optimize [custom instrumentation](/docs/apm/agents/manage-apm-agents/agent-data/custom-instrumentation) and/or [custom metrics](/docs/apm/agents/manage-apm-agents/agent-data/collect-custom-metrics). * Manage [logs-in-context](/docs/logs/logs-context/get-started-logs-context). diff --git a/src/content/docs/data-apis/understand-data/new-relic-data-types.mdx b/src/content/docs/data-apis/understand-data/new-relic-data-types.mdx index 78aa11833d0..a0849eacf4a 100644 --- a/src/content/docs/data-apis/understand-data/new-relic-data-types.mdx +++ b/src/content/docs/data-apis/understand-data/new-relic-data-types.mdx @@ -25,8 +25,6 @@ redirects: - /docs/insights/event-data-sources/default-events - /docs/insights/event-data-sources/custom-events - /docs/insights/event-data-sources/insights-api -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/distributed-tracing/concepts/introduction-distributed-tracing.mdx b/src/content/docs/distributed-tracing/concepts/introduction-distributed-tracing.mdx index 41abcfe3927..25508bdb36e 100644 --- a/src/content/docs/distributed-tracing/concepts/introduction-distributed-tracing.mdx +++ b/src/content/docs/distributed-tracing/concepts/introduction-distributed-tracing.mdx @@ -22,8 +22,6 @@ redirects: - /docs/understand-dependencies/distributed-tracing/get-started/introduction-distributed-tracing - /docs/distributed-tracing/get-started/introduction-distributed-tracing - /docs/distributed-tracing -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/errors-inbox/errors-inbox.mdx b/src/content/docs/errors-inbox/errors-inbox.mdx index 97a5ab1b27e..60babccb800 100644 --- a/src/content/docs/errors-inbox/errors-inbox.mdx +++ b/src/content/docs/errors-inbox/errors-inbox.mdx @@ -22,6 +22,7 @@ redirects: freshnessValidatedDate: never --- + import errorsinboxScoptedOtel from 'images/errors-inbox_screenshot-full_scopted-otel.webp' import errorsinboxErrorsInboxUi from 'images/errors-inbox_screenshot-full_errors-inbox-ui.webp' @@ -42,7 +43,7 @@ import errorsinboxListofTraces from 'images/errors-inbox_screenshot-crop_list-of import errorsinboxTraceforError from 'images/errors-inbox_screenshot-crop_trace-for-error.webp' - +import errorsinboxResolveInVersion from 'images/errors-inbox_screenshot-full_resolve-in-version.webp' ## Unified error tracking experience [#matters] @@ -193,11 +194,34 @@ Errors inbox enables you to triage error groups directly from the main screen or ### Errors status [#status] -You can set one of three statuses, and filter your inbox by status. +When you triage your inbox, you can choose from a variety of statuses. You may choose to resolve errors immediately or even mark them so they are ignored. In other cases, you may want to resolve issues in the next version or in a specific version. + + + The options **Resolve in next version** and **Resolve in specific version** are only supported if your team uses semantic versioning. + + +Screenshot showing resolve in version +
+ In the errors inbox UI, you can indicate when you want to resolve errors. +
-* `Unresolved`: This is the default status of error groups. -* `Resolved`: Setting an error as resolved will hide it from the default inbox view unless filters are updated to include resolved errors. If events matching the error group fingerprint occur after marking an error group as resolved, it will automatically reset the status to `Unresolved`. This can be useful for identifying regressions. -* `Ignored`: This will hide the error group from the inbox view unless filters are updated to include ignored errors, or until you stop ignoring the error group. +You can set one of following statuses and filter your inbox by status: + +* **Unresolved**: This is the default status of error groups. +* **Resolve in next version**: We recommend that you use this status if you expect to resolve this error group in your next release. In order to enable this option, you need to set up [change tracking](/docs/change-tracking/change-tracking-introduction/#start-tracking) for your application so that errors inbox can detect a new release and check that the error group was indeed resolved. In the event the error group is still detected in the next version or any future versions, the error group will be auto-unresolved, marked with a regression, and you'll receive a Slack notification about the regression. +* **Resolve in specific version**: Choose this option to resolve error groups in these situations: + * If you know the error group will be resolved in a specific version + * If ​​you know the error group is resolved in an existing version + * If you'd like to enter a specific version + + In order to enable this option, you need to set up [version tracking](/docs/errors-inbox/version-tracking/) for your application or service. If an error occurrence with an equivalent or higher semantic version is detected, the error group will be auto-unresolved, marked with a regression, and you'll receive a Slack notification about the regression. +* **Resolve**: Setting an error group as resolved will hide it from the default inbox view unless filters are updated to include resolved error groups. If events matching the error group fingerprint occur after marking an error group as resolved, it will automatically reset the status to **Unresolved**. This can be useful for identifying regressions. +* **Ignore**: This will hide the error group from the inbox view unless filters are updated to include ignored errors, or until you stop ignoring the error group. ### Expected errors [#expected] diff --git a/src/content/docs/infrastructure/amazon-integrations/connect/aws-metric-stream.mdx b/src/content/docs/infrastructure/amazon-integrations/connect/aws-metric-stream.mdx index 56eec3f2cad..806cd1511af 100644 --- a/src/content/docs/infrastructure/amazon-integrations/connect/aws-metric-stream.mdx +++ b/src/content/docs/infrastructure/amazon-integrations/connect/aws-metric-stream.mdx @@ -157,7 +157,6 @@ The AWS CloudWatch Metric Streams integration focuses on CloudWatch metrics. As Polling integrations based on service APIs: -* AWS Billing * AWS CloudTrail * AWS Health * AWS Trusted Advisor diff --git a/src/content/docs/infrastructure/amazon-integrations/connect/eks-add-on.mdx b/src/content/docs/infrastructure/amazon-integrations/connect/eks-add-on.mdx index 2c4bb11e096..b6b1bb2d0aa 100644 --- a/src/content/docs/infrastructure/amazon-integrations/connect/eks-add-on.mdx +++ b/src/content/docs/infrastructure/amazon-integrations/connect/eks-add-on.mdx @@ -19,7 +19,7 @@ Amazon Elastic Kubernetes Service (Amazon EKS) is a managed Kubernetes service t ## Prerequisites [#prereq] -* An [Amazon Elastic Kubernetes Service (EKS)](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) cluster. See the user guide [for more information](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). +* An [Amazon Elastic Kubernetes Service (EKS)](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) cluster. For more information, see our [Kubernetes integration: compatibility and requirements](/docs/kubernetes-pixie/kubernetes-integration/get-started/kubernetes-integration-compatibility-requirements/) and [Amazon's user guide](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). * A valid New Relic account. You can subscribe to New Relic for free [from the AWS marketplace](https://aws.amazon.com/marketplace/pp/prodview-ov56chowabeb4?sr=0-3&ref_=beagle&applicationId=AWS-Marketplace-Console). * Subscribe to the [New Relic Kubernetes integration](https://aws.amazon.com/marketplace/pp/prodview-gcywa6keq2ajy?applicationId=AWS-Marketplace-Console&ref_=beagle&sr=0-5), also available for free. * Login to [New Relic](https://one.newrelic.com/) and generate a [license key](https://one.newrelic.com/api-keys). diff --git a/src/content/docs/infrastructure/amazon-integrations/get-started/aws-integrations-metrics.mdx b/src/content/docs/infrastructure/amazon-integrations/get-started/aws-integrations-metrics.mdx index 029c5972813..bec54895be2 100644 --- a/src/content/docs/infrastructure/amazon-integrations/get-started/aws-integrations-metrics.mdx +++ b/src/content/docs/infrastructure/amazon-integrations/get-started/aws-integrations-metrics.mdx @@ -67,6 +67,3541 @@ For historical reasons, we suffix some metrics that are part of pairs where one We no longer add these mappings, and we're committed to preserving all the new metrics with their original AWS name, as defined by the general rule above. The following is a complete list of all the metrics that are suffixed with `.by + dimensionName`: +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Original metric name + + Original dimension name + + Suffixed metric name +
+ `aws.apigateway.4XXError` + + `Api` + + `aws.apigateway.4XXError.byApi` +
+ `aws.apigateway.4XXError` + + `ResourceWithMetrics` + + `aws.apigateway.4XXError.byResourceWithMetrics` +
+ `aws.apigateway.4XXError` + + `Stage` + + `aws.apigateway.4XXError.byStage` +
+ `aws.apigateway.5XXError` + + `Api` + + `aws.apigateway.5XXError.byApi` +
+ `aws.apigateway.5XXError` + + `ResourceWithMetrics` + + `aws.apigateway.5XXError.byResourceWithMetrics` +
+ `aws.apigateway.5XXError` + + `Stage` + + `aws.apigateway.5XXError.byStage` +
+ `aws.apigateway.CacheHitCount` + + `Api` + + `aws.apigateway.CacheHitCount.byApi` +
+ `aws.apigateway.CacheHitCount` + + `ResourceWithMetrics` + + `aws.apigateway.CacheHitCount.byResourceWithMetrics` +
+ `aws.apigateway.CacheHitCount` + + `Stage` + + `aws.apigateway.CacheHitCount.byStage` +
+ `aws.apigateway.CacheMissCount` + `Api` + + `aws.apigateway.CacheMissCount.byApi` +
+ `aws.apigateway.CacheMissCount` + + `ResourceWithMetrics` + + `aws.apigateway.CacheMissCount.byResourceWithMetrics` +
+ `aws.apigateway.CacheMissCount` + + `Stage` + + `aws.apigateway.CacheMissCount.byStage` +
+ `aws.apigateway.Count` + + `Api` + + `aws.apigateway.Count.byApi` +
+ `aws.apigateway.Count` + + `ResourceWithMetrics` + + `aws.apigateway.Count.byResourceWithMetrics` +
+ `aws.apigateway.Count` + + `Stage` + + `aws.apigateway.Count.byStage` +
+ `aws.apigateway.IntegrationLatency` + + `Api` + + `aws.apigateway.IntegrationLatency.byApi` +
+ `aws.apigateway.IntegrationLatency` + + `ResourceWithMetrics` + + `aws.apigateway.IntegrationLatency.byResourceWithMetrics` +
+ `aws.apigateway.IntegrationLatency` + + `Stage` + + `aws.apigateway.IntegrationLatency.byStage` +
+ `aws.apigateway.Latency` + + `Api` + + `aws.apigateway.Latency.byApi` +
+ `aws.apigateway.Latency` + + `ResourceWithMetrics` + + `aws.apigateway.Latency.byResourceWithMetrics` +
+ `aws.apigateway.Latency` + + `Stage` + + `aws.apigateway.Latency.byStage` +
+ `aws.applicationelb.RequestCount` + + `Alb` + + `aws.applicationelb.RequestCount.byAlb` +
+ `aws.applicationelb.RequestCount` + + `TargetGroup` + + `aws.applicationelb.RequestCount.byTargetGroup` +
+ `aws.docdb.BackupRetentionPeriodStorageUsed` + + `Cluster` + + `aws.docdb.BackupRetentionPeriodStorageUsed.byCluster` +
+ `aws.docdb.BackupRetentionPeriodStorageUsed` + + `ClusterByRole` + + `aws.docdb.BackupRetentionPeriodStorageUsed.byClusterByRole` +
+ `aws.docdb.BackupRetentionPeriodStorageUsed` + + `Instance` + + `aws.docdb.BackupRetentionPeriodStorageUsed.byInstance` +
+ `aws.docdb.BufferCacheHitRatio` + + `Cluster` + + `aws.docdb.BufferCacheHitRatio.byCluster` +
+ `aws.docdb.BufferCacheHitRatio` + + `ClusterByRole` + + `aws.docdb.BufferCacheHitRatio.byClusterByRole` +
+ `aws.docdb.BufferCacheHitRatio` + + `Instance` + + `aws.docdb.BufferCacheHitRatio.byInstance` +
+ `aws.docdb.CPUUtilization` + + `Cluster` + + `aws.docdb.CPUUtilization.byCluster` +
+ `aws.docdb.CPUUtilization` + + `ClusterByRole` + + `aws.docdb.CPUUtilization.byClusterByRole` +
+ `aws.docdb.CPUUtilization` + + `Instance` + + `aws.docdb.CPUUtilization.byInstance` +
+ `aws.docdb.DatabaseConnections` + + `Cluster` + + `aws.docdb.DatabaseConnections.byCluster` +
+ `aws.docdb.DatabaseConnections` + + `ClusterByRole` + + `aws.docdb.DatabaseConnections.byClusterByRole` +
+ `aws.docdb.DatabaseConnections` + + `Instance` + + `aws.docdb.DatabaseConnections.byInstance` +
+ `aws.docdb.DBClusterReplicaLagMaximum` + + `Cluster` + + `aws.docdb.DBClusterReplicaLagMaximum.byCluster` +
+ `aws.docdb.DBClusterReplicaLagMaximum` + + `ClusterByRole` + + `aws.docdb.DBClusterReplicaLagMaximum.byClusterByRole` +
+ `aws.docdb.DBClusterReplicaLagMaximum` + + `Instance` + + `aws.docdb.DBClusterReplicaLagMaximum.byInstance` +
+ `aws.docdb.DBClusterReplicaLagMinimum` + + `Cluster` + + `aws.docdb.DBClusterReplicaLagMinimum.byCluster` +
+ `aws.docdb.DBClusterReplicaLagMinimum` + + `ClusterByRole` + + `aws.docdb.DBClusterReplicaLagMinimum.byClusterByRole` +
+ `aws.docdb.DBClusterReplicaLagMinimum` + + `Instance` + + `aws.docdb.DBClusterReplicaLagMinimum.byInstance` +
+ `aws.docdb.DBInstanceReplicaLag` + + `Cluster` + + `aws.docdb.DBInstanceReplicaLag.byCluster` +
+ `aws.docdb.DBInstanceReplicaLag` + + `ClusterByRole` + + `aws.docdb.DBInstanceReplicaLag.byClusterByRole` +
+ `aws.docdb.DBInstanceReplicaLag` + + `Instance` + + `aws.docdb.DBInstanceReplicaLag.byInstance` +
+ `aws.docdb.DiskQueueDepth` + + `Cluster` + + `aws.docdb.DiskQueueDepth.byCluster` +
+ `aws.docdb.DiskQueueDepth` + + `ClusterByRole` + + `aws.docdb.DiskQueueDepth.byClusterByRole` +
+ `aws.docdb.DiskQueueDepth` + + `Instance` + + `aws.docdb.DiskQueueDepth.byInstance` +
+ `aws.docdb.EngineUptime` + + `Cluster` + + `aws.docdb.EngineUptime.byCluster` +
+ `aws.docdb.EngineUptime` + + `ClusterByRole` + + `aws.docdb.EngineUptime.byClusterByRole` +
+ `aws.docdb.EngineUptime` + + `Instance` + + `aws.docdb.EngineUptime.byInstance` +
+ `aws.docdb.FreeableMemory` + + `Cluster` + + `aws.docdb.FreeableMemory.byCluster` +
+ `aws.docdb.FreeableMemory` + + `ClusterByRole` + + `aws.docdb.FreeableMemory.byClusterByRole` +
+ `aws.docdb.FreeableMemory` + + `Instance` + + `aws.docdb.FreeableMemory.byInstance` +
+ `aws.docdb.FreeLocalStorage` + + `Cluster` + + `aws.docdb.FreeLocalStorage.byCluster` +
+ `aws.docdb.FreeLocalStorage` + + `ClusterByRole` + + `aws.docdb.FreeLocalStorage.byClusterByRole` +
+ `aws.docdb.FreeLocalStorage` + + `Instance` + + `aws.docdb.FreeLocalStorage.byInstance` +
+ `aws.docdb.NetworkReceiveThroughput` + + `Cluster` + + `aws.docdb.NetworkReceiveThroughput.byCluster` +
+ `aws.docdb.NetworkReceiveThroughput` + + `ClusterByRole` + + `aws.docdb.NetworkReceiveThroughput.byClusterByRole` +
+ `aws.docdb.NetworkReceiveThroughput` + + `Instance` + + `aws.docdb.NetworkReceiveThroughput.byInstance` +
+ `aws.docdb.NetworkThroughput` + + `Cluster` + + `aws.docdb.NetworkThroughput.byCluster` +
+ `aws.docdb.NetworkThroughput` + + `ClusterByRole` + + `aws.docdb.NetworkThroughput.byClusterByRole` +
+ `aws.docdb.NetworkThroughput` + + `Instance` + + `aws.docdb.NetworkThroughput.byInstance` +
+ `aws.docdb.NetworkTransmitThroughput` + + `Cluster` + + `aws.docdb.NetworkTransmitThroughput.byCluster` +
+ `aws.docdb.NetworkTransmitThroughput` + + `ClusterByRole` + + `aws.docdb.NetworkTransmitThroughput.byClusterByRole` +
+ `aws.docdb.NetworkTransmitThroughput` + + `Instance` + + `aws.docdb.NetworkTransmitThroughput.byInstance` +
+ `aws.docdb.ReadIOPS` + + `Cluster` + + `aws.docdb.ReadIOPS.byCluster` +
+ `aws.docdb.ReadIOPS` + + `ClusterByRole` + + `aws.docdb.ReadIOPS.byClusterByRole` +
+ `aws.docdb.ReadIOPS` + + `Instance` + + `aws.docdb.ReadIOPS.byInstance` +
+ `aws.docdb.ReadLatency` + + `Cluster` + + `aws.docdb.ReadLatency.byCluster` +
+ `aws.docdb.ReadLatency` + + `ClusterByRole` + + `aws.docdb.ReadLatency.byClusterByRole`
+ `aws.docdb.ReadLatency` + + `Instance` + + `aws.docdb.ReadLatency.byInstance` +
+ `aws.docdb.ReadThroughput` + + `Cluster` + + `aws.docdb.ReadThroughput.byCluster` +
+ `aws.docdb.ReadThroughput` + + `ClusterByRole` + + `aws.docdb.ReadThroughput.byClusterByRole` +
+ `aws.docdb.ReadThroughput` + + `Instance` + + `aws.docdb.ReadThroughput.byInstance` +
+ `aws.docdb.SnapshotStorageUsed` + + `Cluster` + + `aws.docdb.SnapshotStorageUsed.byCluster` +
+ `aws.docdb.SnapshotStorageUsed` + + `ClusterByRole` + + `aws.docdb.SnapshotStorageUsed.byClusterByRole` +
+ `aws.docdb.SnapshotStorageUsed` + + `Instance` + + `aws.docdb.SnapshotStorageUsed.byInstance` +
+ `aws.docdb.SwapUsage` + + `Cluster` + + `aws.docdb.SwapUsage.byCluster` +
+ `aws.docdb.SwapUsage` + + `ClusterByRole` + + `aws.docdb.SwapUsage.byClusterByRole` +
+ `aws.docdb.SwapUsage` + + `Instance` + + `aws.docdb.SwapUsage.byInstance` +
+ `aws.docdb.TotalBackupStorageBilled` + + `Cluster` + + `aws.docdb.TotalBackupStorageBilled.byCluster` +
+ `aws.docdb.TotalBackupStorageBilled` + + `ClusterByRole` + + `aws.docdb.TotalBackupStorageBilled.byClusterByRole` +
+ `aws.docdb.TotalBackupStorageBilled` + + `Instance` + + `aws.docdb.TotalBackupStorageBilled.byInstance` +
+ `aws.docdb.VolumeBytesUsed` + + `Cluster` + + `aws.docdb.VolumeBytesUsed.byCluster` +
+ `aws.docdb.VolumeBytesUsed` + + `ClusterByRole` + + `aws.docdb.VolumeBytesUsed.byClusterByRole` +
+ `aws.docdb.VolumeBytesUsed` + + `Instance` + + `aws.docdb.VolumeBytesUsed.byInstance` +
+ `aws.docdb.VolumeReadIOPs` + + `Cluster` + + `aws.docdb.VolumeReadIOPs.byCluster` +
+ `aws.docdb.VolumeReadIOPs` + + `ClusterByRole` + + `aws.docdb.VolumeReadIOPs.byClusterByRole` +
+ `aws.docdb.VolumeReadIOPs` + + `Instance` + + `aws.docdb.VolumeReadIOPs.byInstance` +
+ `aws.docdb.VolumeWriteIOPs` + + `Cluster` + + `aws.docdb.VolumeWriteIOPs.byCluster` +
+ `aws.docdb.VolumeWriteIOPs` + + `ClusterByRole` + + `aws.docdb.VolumeWriteIOPs.byClusterByRole` +
+ `aws.docdb.VolumeWriteIOPs` + + `Instance` + + `aws.docdb.VolumeWriteIOPs.byInstance` +
+ `aws.docdb.WriteIOPS` + `Cluster` + + `aws.docdb.WriteIOPS.byCluster` +
+ `aws.docdb.WriteIOPS` + + `ClusterByRole` + + `aws.docdb.WriteIOPS.byClusterByRole` +
+ `aws.docdb.WriteIOPS` + + `Instance` + + `aws.docdb.WriteIOPS.byInstance` +
+ `aws.docdb.WriteLatency` + + `Cluster` + + `aws.docdb.WriteLatency.byCluster` +
+ `aws.docdb.WriteLatency` + + `ClusterByRole` + + `aws.docdb.WriteLatency.byClusterByRole` +
+ `aws.docdb.WriteLatency` + + `Instance` + + `aws.docdb.WriteLatency.byInstance` +
+ `aws.docdb.WriteThroughput` + + `Cluster` + + `aws.docdb.WriteThroughput.byCluster` +
+ `aws.docdb.WriteThroughput` + + `ClusterByRole` + + `aws.docdb.WriteThroughput.byClusterByRole` +
+ `aws.docdb.WriteThroughput` + + `Instance` + + `aws.docdb.WriteThroughput.byInstance` +
+ `aws.dynamodb.ConsumedReadCapacityUnits` + + `GlobalSecondaryIndex` + + `aws.dynamodb.ConsumedReadCapacityUnits.byGlobalSecondaryIndex` +
+ `aws.dynamodb.ConsumedWriteCapacityUnits` + + `GlobalSecondaryIndex` + + `aws.dynamodb.ConsumedWriteCapacityUnits.byGlobalSecondaryIndex` +
+ `aws.dynamodb.ProvisionedReadCapacityUnits` + + `GlobalSecondaryIndex` + + `aws.dynamodb.ProvisionedReadCapacityUnits.byGlobalSecondaryIndex` +
+ `aws.dynamodb.ProvisionedWriteCapacityUnits` + + `GlobalSecondaryIndex` + + `aws.dynamodb.ProvisionedWriteCapacityUnits.byGlobalSecondaryIndex` +
+ `aws.dynamodb.ReadThrottleEvents` + + `GlobalSecondaryIndex` + + `aws.dynamodb.ReadThrottleEvents.byGlobalSecondaryIndex` +
+ `aws.dynamodb.WriteThrottleEvents` + + `GlobalSecondaryIndex` + + `aws.dynamodb.WriteThrottleEvents.byGlobalSecondaryIndex` +
+ `aws.ecs.activeServicesCount` + + `Cluster` + + `aws.ecs.activeServicesCount.byCluster` +
+ `aws.ecs.CPUUtilization` + + `Cluster` + + `aws.ecs.CPUUtilization.byCluster` +
+ `aws.ecs.CPUUtilization` + + `Service` + + `aws.ecs.CPUUtilization.byService` +
+ `aws.ecs.desiredCount` + + `Service` + + `aws.ecs.desiredCount.byService` +
+ `aws.ecs.MemoryUtilization` + + `Cluster` + + `aws.ecs.MemoryUtilization.byCluster` +
+ `aws.ecs.MemoryUtilization` + + `Service` + + `aws.ecs.MemoryUtilization.byService` +
+ `aws.ecs.pendingCount` + + `Service` + + `aws.ecs.pendingCount.byService` +
+ `aws.ecs.pendingTasksCount` + + `Cluster` + + `aws.ecs.pendingTasksCount.byCluster` +
+ `aws.ecs.registeredContainerInstancesCount` + + `Cluster` + + `aws.ecs.registeredContainerInstancesCount.byCluster` +
+ `aws.ecs.runningCount` + + `Service` + + `aws.ecs.runningCount.byService` +
+ `aws.ecs.runningTasksCount` + + `Cluster` + + `aws.ecs.runningTasksCount.byCluster` +
+ `aws.es.CPUUtilization` + + `Cluster` + + `aws.es.CPUUtilization.byCluster` +
+ `aws.es.CPUUtilization` + + `Node` + + `aws.es.CPUUtilization.byNode` +
+ `aws.es.FreeStorageSpace` + + `Cluster` + + `aws.es.FreeStorageSpace.byCluster` +
+ `aws.es.FreeStorageSpace` + + `Node` + + `aws.es.FreeStorageSpace.byNode` +
+ `aws.es.IndexingLatency` + + `Cluster` + + `aws.es.IndexingLatency.byCluster` +
+ `aws.es.IndexingLatency` + + `Node` + + `aws.es.IndexingLatency.byNode` +
+ `aws.es.IndexingRate` + + `Cluster` + + `aws.es.IndexingRate.byCluster` +
+ `aws.es.IndexingRate` + + `Node` + + `aws.es.IndexingRate.byNode` +
+ `aws.es.JVMGCOldCollectionCount` + + `Cluster` + + `aws.es.JVMGCOldCollectionCount.byCluster` +
+ `aws.es.JVMGCOldCollectionCount` + + `Node` + + `aws.es.JVMGCOldCollectionCount.byNode` +
+ `aws.es.JVMGCOldCollectionTime` + + `Cluster` + + `aws.es.JVMGCOldCollectionTime.byCluster` +
+ `aws.es.JVMGCOldCollectionTime` + + `Node` + + `aws.es.JVMGCOldCollectionTime.byNode` +
+ `aws.es.JVMGCYoungCollectionCount` + + `Cluster` + + `aws.es.JVMGCYoungCollectionCount.byCluster` +
+ `aws.es.JVMGCYoungCollectionCount` + + `Node` + + `aws.es.JVMGCYoungCollectionCount.byNode` +
+ `aws.es.JVMGCYoungCollectionTime` + + `Cluster` + + `aws.es.JVMGCYoungCollectionTime.byCluster` +
+ `aws.es.JVMGCYoungCollectionTime` + + `Node` + + `aws.es.JVMGCYoungCollectionTime.byNode` +
+ `aws.es.JVMMemoryPressure` + + `Cluster` + + `aws.es.JVMMemoryPressure.byCluster` +
+ `aws.es.JVMMemoryPressure` + + `Node` + + `aws.es.JVMMemoryPressure.byNode` +
+ `aws.es.SearchLatency` + + `Cluster` + + `aws.es.SearchLatency.byCluster` +
+ `aws.es.SearchLatency` + + `Node` + + `aws.es.SearchLatency.byNode` +
+ `aws.es.SearchRate` + + `Cluster` + + `aws.es.SearchRate.byCluster` +
+ `aws.es.SearchRate` + + `Node` + + `aws.es.SearchRate.byNode` +
+ `aws.es.SysMemoryUtilization` + + `Cluster` + + `aws.es.SysMemoryUtilization.byCluster` +
+ `aws.es.SysMemoryUtilization` + + `Node` + + `aws.es.SysMemoryUtilization.byNode` +
+ `aws.es.ThreadpoolBulkQueue` + + `Cluster` + + `aws.es.ThreadpoolBulkQueue.byCluster` +
+ `aws.es.ThreadpoolBulkQueue` + + `Node` + + `aws.es.ThreadpoolBulkQueue.byNode` +
+ `aws.es.ThreadpoolBulkRejected` + + `Cluster` + + `aws.es.ThreadpoolBulkRejected.byCluster` +
+ `aws.es.ThreadpoolBulkRejected` + + `Node` + + `aws.es.ThreadpoolBulkRejected.byNode` +
+ `aws.es.ThreadpoolBulkThreads` + + `Cluster` + + `aws.es.ThreadpoolBulkThreads.byCluster` +
+ `aws.es.ThreadpoolBulkThreads` + + `Node` + + `aws.es.ThreadpoolBulkThreads.byNode` +
+ `aws.es.ThreadpoolForce_mergeQueue` + + `Cluster` + + `aws.es.ThreadpoolForce_mergeQueue.byCluster` +
+ `aws.es.ThreadpoolForce_mergeQueue` + + `Node` + + `aws.es.ThreadpoolForce_mergeQueue.byNode` +
+ `aws.es.ThreadpoolForce_mergeRejected` + + `Cluster` + + `aws.es.ThreadpoolForce_mergeRejected.byCluster` +
+ `aws.es.ThreadpoolForce_mergeRejected` + + `Node` + + `aws.es.ThreadpoolForce_mergeRejected.byNode` +
+ `aws.es.ThreadpoolForce_mergeThreads` + + `Cluster` + + `aws.es.ThreadpoolForce_mergeThreads.byCluster` +
+ `aws.es.ThreadpoolForce_mergeThreads` + + `Node` + + `aws.es.ThreadpoolForce_mergeThreads.byNode` +
+ `aws.es.ThreadpoolIndexQueue` + + `Cluster` + + `aws.es.ThreadpoolIndexQueue.byCluster` +
+ `aws.es.ThreadpoolIndexQueue` + + `Node` + + `aws.es.ThreadpoolIndexQueue.byNode` +
+ `aws.es.ThreadpoolIndexRejected` + + `Cluster` + + `aws.es.ThreadpoolIndexRejected.byCluster` +
+ `aws.es.ThreadpoolIndexRejected` + + `Node` + + `aws.es.ThreadpoolIndexRejected.byNode` +
+ `aws.es.ThreadpoolIndexThreads` + + `Cluster` + + `aws.es.ThreadpoolIndexThreads.byCluster` +
+ `aws.es.ThreadpoolIndexThreads` + + `Node` + + `aws.es.ThreadpoolIndexThreads.byNode` +
+ `aws.es.ThreadpoolSearchQueue` + + `Cluster` + + `aws.es.ThreadpoolSearchQueue.byCluster` +
+ `aws.es.ThreadpoolSearchQueue` + + `Node` + + `aws.es.ThreadpoolSearchQueue.byNode` +
+ `aws.es.ThreadpoolSearchRejected` + + `Cluster` + + `aws.es.ThreadpoolSearchRejected.byCluster` +
+ `aws.es.ThreadpoolSearchRejected` + + `Node` + + `aws.es.ThreadpoolSearchRejected.byNode` +
+ `aws.es.ThreadpoolSearchThreads` + + `Cluster` + + `aws.es.ThreadpoolSearchThreads.byCluster` +
+ `aws.es.ThreadpoolSearchThreads` + + `Node` + + `aws.es.ThreadpoolSearchThreads.byNode` +
+ `aws.kafka.BytesInPerSec` + + `Broker` + + `aws.kafka.BytesInPerSec.byBroker` +
+ `aws.kafka.BytesInPerSec` + + `Topic` + + `aws.kafka.BytesInPerSec.byTopic` +
+ `aws.kafka.BytesOutPerSec` + + `Broker` + + `aws.kafka.BytesOutPerSec.byBroker` +
+ `aws.kafka.BytesOutPerSec` + + `Topic` + + `aws.kafka.BytesOutPerSec.byTopic` +
+ `aws.kafka.FetchMessageConversionsPerSec` + + `Broker` + + `aws.kafka.FetchMessageConversionsPerSec.byBroker` +
+ `aws.kafka.FetchMessageConversionsPerSec` + + `Topic` + + `aws.kafka.FetchMessageConversionsPerSec.byTopic` +
+ `aws.kafka.MessagesInPerSec` + + `Broker` + + `aws.kafka.MessagesInPerSec.byBroker` +
+ `aws.kafka.MessagesInPerSec` + + `Topic` + + `aws.kafka.MessagesInPerSec.byTopic` +
+ `aws.kafka.ProduceMessageConversionsPerSec` + + `Broker` + + `aws.kafka.ProduceMessageConversionsPerSec.byBroker` +
+ `aws.kafka.ProduceMessageConversionsPerSec` + + `Topic` + + `aws.kafka.ProduceMessageConversionsPerSec.byTopic` +
+ `aws.kinesis.IncomingBytes` + + `Stream` + + `aws.kinesis.IncomingBytes.byStream` +
+ `aws.kinesis.IncomingBytes` + + `StreamShard` + + `aws.kinesis.IncomingBytes.byStreamShard` +
+ `aws.kinesis.IncomingRecords` + + `Stream` + + `aws.kinesis.IncomingRecords.byStream` +
+ `aws.kinesis.IncomingRecords` + + `StreamShard` + + `aws.kinesis.IncomingRecords.byStreamShard` +
+ `aws.kinesis.ReadProvisionedThroughputExceeded` + + `Stream` + + `aws.kinesis.ReadProvisionedThroughputExceeded.byStream` +
+ `aws.kinesis.ReadProvisionedThroughputExceeded` + + `StreamShard` + + `aws.kinesis.ReadProvisionedThroughputExceeded.byStreamShard` +
+ `aws.kinesis.WriteProvisionedThroughputExceeded` + + `Stream` + + `aws.kinesis.WriteProvisionedThroughputExceeded.byStream` +
+ `aws.kinesis.WriteProvisionedThroughputExceeded` + + `StreamShard` + + `aws.kinesis.WriteProvisionedThroughputExceeded.byStreamShard` +
+ `aws.lambda.ConcurrentExecutions` + + `Function` + + `aws.lambda.ConcurrentExecutions.byFunction` +
+ `aws.lambda.ConcurrentExecutions` + + `Region` + + `aws.lambda.ConcurrentExecutions.byRegion` +
+ `aws.lambda.DeadLetterErrors` + + `Function` + + `aws.lambda.DeadLetterErrors.byFunction` +
+ `aws.lambda.DeadLetterErrors` + + `FunctionAlias` + + `aws.lambda.DeadLetterErrors.byFunctionAlias` +
+ `aws.lambda.Duration` + + `Function` + + `aws.lambda.Duration.byFunction` +
+ `aws.lambda.Duration` + + `FunctionAlias` + + `aws.lambda.Duration.byFunctionAlias` +
+ `aws.lambda.Errors` + + `Function` + + `aws.lambda.Errors.byFunction` +
+ `aws.lambda.Errors` + + `FunctionAlias` + + `aws.lambda.Errors.byFunctionAlias` +
+ `aws.lambda.Invocations` + + `Function` + + `aws.lambda.Invocations.byFunction` +
+ `aws.lambda.Invocations` + + `FunctionAlias` + + `aws.lambda.Invocations.byFunctionAlias` +
+ `aws.lambda.IteratorAge` + + `Function` + + `aws.lambda.IteratorAge.byFunction` +
+ `aws.lambda.IteratorAge` + + `FunctionAlias` + + `aws.lambda.IteratorAge.byFunctionAlias` +
+ `aws.lambda.ProvisionedConcurrencyInvocations` + + `Function` + + `aws.lambda.ProvisionedConcurrencyInvocations.byFunction` +
+ `aws.lambda.ProvisionedConcurrencyInvocations` + + `FunctionAlias` + + `aws.lambda.ProvisionedConcurrencyInvocations.byFunctionAlias` +
+ `aws.lambda.ProvisionedConcurrencySpilloverInvocations` + + `Function` + + `aws.lambda.ProvisionedConcurrencySpilloverInvocations.byFunction` +
+ `aws.lambda.ProvisionedConcurrencySpilloverInvocations` + + `FunctionAlias` + + `aws.lambda.ProvisionedConcurrencySpilloverInvocations.byFunctionAlias` +
+ `aws.lambda.ProvisionedConcurrencyUtilization` + + `FunctionAlias` + + `aws.lambda.ProvisionedConcurrencyUtilization.byFunctionAlias` +
+ `aws.lambda.ProvisionedConcurrentExecutions` + + `Function` + + `aws.lambda.ProvisionedConcurrentExecutions.byFunction` +
+ `aws.lambda.ProvisionedConcurrentExecutions` + + `FunctionAlias` + + `aws.lambda.ProvisionedConcurrentExecutions.byFunctionAlias` +
+ `aws.lambda.Throttles` + + `Function` + + `aws.lambda.Throttles.byFunction` +
+ `aws.lambda.Throttles` + + `FunctionAlias` + + `aws.lambda.Throttles.byFunctionAlias` +
+ `aws.neptune.BackupRetentionPeriodStorageUsed` + + `Cluster` + + `aws.neptune.BackupRetentionPeriodStorageUsed.byCluster` +
+ `aws.neptune.BackupRetentionPeriodStorageUsed` + + `ClusterByRole` + + `aws.neptune.BackupRetentionPeriodStorageUsed.byClusterByRole` +
+ `aws.neptune.BackupRetentionPeriodStorageUsed` + + `Instance` + + `aws.neptune.BackupRetentionPeriodStorageUsed.byInstance` +
+ `aws.neptune.ClusterReplicaLag` + + `Cluster` + + `aws.neptune.ClusterReplicaLag.byCluster` +
+ `aws.neptune.ClusterReplicaLag` + + `ClusterByRole` + + `aws.neptune.ClusterReplicaLag.byClusterByRole` +
+ `aws.neptune.ClusterReplicaLag` + + `Instance` + + `aws.neptune.ClusterReplicaLag.byInstance` +
+ `aws.neptune.ClusterReplicaLagMaximum` + + `Cluster` + + `aws.neptune.ClusterReplicaLagMaximum.byCluster` +
+ `aws.neptune.ClusterReplicaLagMaximum` + + `ClusterByRole` + + `aws.neptune.ClusterReplicaLagMaximum.byClusterByRole` +
+ `aws.neptune.ClusterReplicaLagMaximum` + + `Instance` + + `aws.neptune.ClusterReplicaLagMaximum.byInstance` +
+ `aws.neptune.ClusterReplicaLagMinimum` + + `Cluster` + + `aws.neptune.ClusterReplicaLagMinimum.byCluster` +
+ `aws.neptune.ClusterReplicaLagMinimum` + + `ClusterByRole` + + `aws.neptune.ClusterReplicaLagMinimum.byClusterByRole` +
+ `aws.neptune.ClusterReplicaLagMinimum` + + `Instance` + + `aws.neptune.ClusterReplicaLagMinimum.byInstance` +
+ `aws.neptune.CPUUtilization` + + `Cluster` + + `aws.neptune.CPUUtilization.byCluster` +
+ `aws.neptune.CPUUtilization` + + `ClusterByRole` + + `aws.neptune.CPUUtilization.byClusterByRole` +
+ `aws.neptune.CPUUtilization` + + `Instance` + + `aws.neptune.CPUUtilization.byInstance` +
+ `aws.neptune.EngineUptime` + + `Cluster` + + `aws.neptune.EngineUptime.byCluster` +
+ `aws.neptune.EngineUptime` + + `ClusterByRole` + + `aws.neptune.EngineUptime.byClusterByRole` +
+ `aws.neptune.EngineUptime` + + `Instance` + + `aws.neptune.EngineUptime.byInstance` +
+ `aws.neptune.FreeableMemory` + + `Cluster` + + `aws.neptune.FreeableMemory.byCluster` +
+ `aws.neptune.FreeableMemory` + + `ClusterByRole` + + `aws.neptune.FreeableMemory.byClusterByRole` +
+ `aws.neptune.FreeableMemory` + + `Instance` + + `aws.neptune.FreeableMemory.byInstance` +
+ `aws.neptune.GremlinRequestsPerSec` + + `Cluster` + + `aws.neptune.GremlinRequestsPerSec.byCluster` +
+ `aws.neptune.GremlinRequestsPerSec` + + `ClusterByRole` + + `aws.neptune.GremlinRequestsPerSec.byClusterByRole` +
+ `aws.neptune.GremlinRequestsPerSec` + + `Instance` + + `aws.neptune.GremlinRequestsPerSec.byInstance` +
+ `aws.neptune.GremlinWebSocketOpenConnections` + + `Cluster` + + `aws.neptune.GremlinWebSocketOpenConnections.byCluster` +
+ `aws.neptune.GremlinWebSocketOpenConnections` + + `ClusterByRole` + + `aws.neptune.GremlinWebSocketOpenConnections.byClusterByRole` +
+ `aws.neptune.GremlinWebSocketOpenConnections` + + `Instance` + + `aws.neptune.GremlinWebSocketOpenConnections.byInstance` +
+ `aws.neptune.LoaderRequestsPerSec` + + `Cluster` + + `aws.neptune.LoaderRequestsPerSec.byCluster` +
+ `aws.neptune.LoaderRequestsPerSec` + + `ClusterByRole` + + `aws.neptune.LoaderRequestsPerSec.byClusterByRole` +
+ `aws.neptune.LoaderRequestsPerSec` + + `Instance` + + `aws.neptune.LoaderRequestsPerSec.byInstance` +
+ `aws.neptune.MainRequestQueuePendingRequests` + + `Cluster` + + `aws.neptune.MainRequestQueuePendingRequests.byCluster` +
+ `aws.neptune.MainRequestQueuePendingRequests` + + `ClusterByRole` + + `aws.neptune.MainRequestQueuePendingRequests.byClusterByRole` +
+ `aws.neptune.MainRequestQueuePendingRequests` + + `Instance` + + `aws.neptune.MainRequestQueuePendingRequests.byInstance` +
+ `aws.neptune.NetworkReceiveThroughput` + + `Cluster` + + `aws.neptune.NetworkReceiveThroughput.byCluster` +
+ `aws.neptune.NetworkReceiveThroughput` + + `ClusterByRole` + + `aws.neptune.NetworkReceiveThroughput.byClusterByRole` +
+ `aws.neptune.NetworkReceiveThroughput` + + `Instance` + + `aws.neptune.NetworkReceiveThroughput.byInstance` +
+ `aws.neptune.NetworkThroughput` + + `Cluster` + + `aws.neptune.NetworkThroughput.byCluster` +
+ `aws.neptune.NetworkThroughput` + + `ClusterByRole` + + `aws.neptune.NetworkThroughput.byClusterByRole` +
+ `aws.neptune.NetworkThroughput` + + `Instance` + + `aws.neptune.NetworkThroughput.byInstance` +
+ `aws.neptune.NetworkTransmitThroughput` + + `Cluster` + + `aws.neptune.NetworkTransmitThroughput.byCluster` +
+ `aws.neptune.NetworkTransmitThroughput` + + `ClusterByRole` + + `aws.neptune.NetworkTransmitThroughput.byClusterByRole` +
+ `aws.neptune.NetworkTransmitThroughput` + + `Instance` + + `aws.neptune.NetworkTransmitThroughput.byInstance` +
+ `aws.neptune.NumTxCommitted` + + `Cluster` + + `aws.neptune.NumTxCommitted.byCluster` +
+ `aws.neptune.NumTxCommitted` + + `ClusterByRole` + + `aws.neptune.NumTxCommitted.byClusterByRole` +
+ `aws.neptune.NumTxCommitted` + + `Instance` + + `aws.neptune.NumTxCommitted.byInstance` +
+ `aws.neptune.NumTxOpened` + + `Cluster` + + `aws.neptune.NumTxOpened.byCluster` +
+ `aws.neptune.NumTxOpened` + + `ClusterByRole` + + `aws.neptune.NumTxOpened.byClusterByRole` +
+ `aws.neptune.NumTxOpened` + + `Instance` + + `aws.neptune.NumTxOpened.byInstance` +
+ `aws.neptune.NumTxRolledBack` + + `Cluster` + + `aws.neptune.NumTxRolledBack.byCluster` +
+ `aws.neptune.NumTxRolledBack` + + `ClusterByRole` + + `aws.neptune.NumTxRolledBack.byClusterByRole` +
+ `aws.neptune.NumTxRolledBack` + + `Instance` + + `aws.neptune.NumTxRolledBack.byInstance` +
+ `aws.neptune.SnapshotStorageUsed` + + `Cluster` + + `aws.neptune.SnapshotStorageUsed.byCluster` +
+ `aws.neptune.SnapshotStorageUsed` + + `ClusterByRole` + + `aws.neptune.SnapshotStorageUsed.byClusterByRole` +
+ `aws.neptune.SnapshotStorageUsed` + + `Instance` + + `aws.neptune.SnapshotStorageUsed.byInstance` +
+ `aws.neptune.SparqlRequestsPerSec` + + `Cluster` + + `aws.neptune.SparqlRequestsPerSec.byCluster` +
+ `aws.neptune.SparqlRequestsPerSec` + + `ClusterByRole` + + `aws.neptune.SparqlRequestsPerSec.byClusterByRole` +
+ `aws.neptune.SparqlRequestsPerSec` + + `Instance` + + `aws.neptune.SparqlRequestsPerSec.byInstance` +
+ `aws.neptune.TotalBackupStorageBilled` + + `Cluster` + + `aws.neptune.TotalBackupStorageBilled.byCluster` +
+ `aws.neptune.TotalBackupStorageBilled` + + `ClusterByRole` + + `aws.neptune.TotalBackupStorageBilled.byClusterByRole` +
+ `aws.neptune.TotalBackupStorageBilled` + + `Instance` + + `aws.neptune.TotalBackupStorageBilled.byInstance` +
+ `aws.neptune.TotalClientErrorsPerSec` + + `Cluster` + + `aws.neptune.TotalClientErrorsPerSec.byCluster` +
+ `aws.neptune.TotalClientErrorsPerSec` + + `ClusterByRole` + + `aws.neptune.TotalClientErrorsPerSec.byClusterByRole` +
+ `aws.neptune.TotalClientErrorsPerSec` + + `Instance` + + `aws.neptune.TotalClientErrorsPerSec.byInstance` +
+ `aws.neptune.TotalRequestsPerSec` + + `Cluster` + + `aws.neptune.TotalRequestsPerSec.byCluster` +
+ `aws.neptune.TotalRequestsPerSec` + + `ClusterByRole` + + `aws.neptune.TotalRequestsPerSec.byClusterByRole` +
+ `aws.neptune.TotalRequestsPerSec` + + `Instance` + + `aws.neptune.TotalRequestsPerSec.byInstance` +
+ `aws.neptune.TotalServerErrorsPerSec` + + `Cluster` + + `aws.neptune.TotalServerErrorsPerSec.byCluster` +
+ `aws.neptune.TotalServerErrorsPerSec` + + `ClusterByRole` + + `aws.neptune.TotalServerErrorsPerSec.byClusterByRole` +
+ `aws.neptune.TotalServerErrorsPerSec` + + `Instance` + + `aws.neptune.TotalServerErrorsPerSec.byInstance` +
+ `aws.neptune.VolumeBytesUsed` + + `Cluster` + + `aws.neptune.VolumeBytesUsed.byCluster` +
+ `aws.neptune.VolumeBytesUsed` + + `ClusterByRole` + + `aws.neptune.VolumeBytesUsed.byClusterByRole` +
+ `aws.neptune.VolumeBytesUsed` + + `Instance` + + `aws.neptune.VolumeBytesUsed.byInstance` +
+ `aws.neptune.VolumeReadIOPs` + + `Cluster` + + `aws.neptune.VolumeReadIOPs.byCluster` +
+ `aws.neptune.VolumeReadIOPs` + + `ClusterByRole` + + `aws.neptune.VolumeReadIOPs.byClusterByRole` +
+ `aws.neptune.VolumeReadIOPs` + + `Instance` + + `aws.neptune.VolumeReadIOPs.byInstance` +
+ `aws.neptune.VolumeWriteIOPs` + + `Cluster` + + `aws.neptune.VolumeWriteIOPs.byCluster` +
+ `aws.neptune.VolumeWriteIOPs` + + `ClusterByRole` + + `aws.neptune.VolumeWriteIOPs.byClusterByRole` +
+ `aws.neptune.VolumeWriteIOPs` + + `Instance` + + `aws.neptune.VolumeWriteIOPs.byInstance` +
+ `aws.rds.VolumeBytesUsed` + + `DbCluster` + + `aws.rds.VolumeBytesUsed.byDbCluster` +
+ `aws.rds.VolumeReadIOPs` + + `DbCluster` + + `aws.rds.VolumeReadIOPs.byDbCluster` +
+ `aws.rds.VolumeWriteIOPs` + + `DbCluster` + + `aws.rds.VolumeWriteIOPs.byDbCluster` +
+ `aws.redshift.CPUUtilization` + + `Cluster` + + `aws.redshift.CPUUtilization.byCluster` +
+ `aws.redshift.CPUUtilization` + + `Node` + + `aws.redshift.CPUUtilization.byNode` +
+ `aws.redshift.DatabaseConnections` + + `Cluster` + + `aws.redshift.DatabaseConnections.byCluster` +
+ `aws.redshift.DatabaseConnections` + + `Node` + + `aws.redshift.DatabaseConnections.byNode` +
+ `aws.redshift.HealthStatus` + + `Cluster` + + `aws.redshift.HealthStatus.byCluster` +
+ `aws.redshift.HealthStatus` + + `Node` + + `aws.redshift.HealthStatus.byNode` +
+ `aws.redshift.MaintenanceMode` + + `Cluster` + + `aws.redshift.MaintenanceMode.byCluster` +
+ `aws.redshift.MaintenanceMode` + + `Node` + + `aws.redshift.MaintenanceMode.byNode` +
+ `aws.redshift.NetworkReceiveThroughput` + + `Cluster` + + `aws.redshift.NetworkReceiveThroughput.byCluster` +
+ `aws.redshift.NetworkReceiveThroughput` + + `Node` + + `aws.redshift.NetworkReceiveThroughput.byNode` +
+ `aws.redshift.NetworkTransmitThroughput` + + `Cluster` + + `aws.redshift.NetworkTransmitThroughput.byCluster` +
+ `aws.redshift.NetworkTransmitThroughput` + + `Node` + + `aws.redshift.NetworkTransmitThroughput.byNode` +
+ `aws.redshift.PercentageDiskSpaceUsed` + + `Cluster` + + `aws.redshift.PercentageDiskSpaceUsed.byCluster` +
+ `aws.redshift.PercentageDiskSpaceUsed` + + `Node` + + `aws.redshift.PercentageDiskSpaceUsed.byNode` +
+ `aws.redshift.ReadIOPS` + + `Cluster` + + `aws.redshift.ReadIOPS.byCluster` +
+ `aws.redshift.ReadIOPS` + + `Node` + + `aws.redshift.ReadIOPS.byNode` +
+ `aws.redshift.ReadLatency` + + `Cluster` + + `aws.redshift.ReadLatency.byCluster` +
+ `aws.redshift.ReadLatency` + + `Node` + + `aws.redshift.ReadLatency.byNode` +
+ `aws.redshift.ReadThroughput` + + `Cluster` + + `aws.redshift.ReadThroughput.byCluster` +
+ `aws.redshift.ReadThroughput` + + `Node` + + `aws.redshift.ReadThroughput.byNode` +
+ `aws.redshift.WriteIOPS` + + `Cluster` + + `aws.redshift.WriteIOPS.byCluster` +
+ `aws.redshift.WriteIOPS` + + `Node` + + `aws.redshift.WriteIOPS.byNode` +
+ `aws.redshift.WriteLatency` + + `Cluster` + + `aws.redshift.WriteLatency.byCluster` +
+ `aws.redshift.WriteLatency` + + `Node` + + `aws.redshift.WriteLatency.byNode` +
+ `aws.redshift.WriteThroughput` + + `Cluster` + + `aws.redshift.WriteThroughput.byCluster` +
+ `aws.redshift.WriteThroughput` + + `Node` + + `aws.redshift.WriteThroughput.byNode` +
+ `aws.states.ConsumedCapacity` + + `ApiUsage` + + `aws.states.ConsumedCapacity.byApiUsage` +
+ `aws.states.ConsumedCapacity` + + `Service` + + `aws.states.ConsumedCapacity.byService` +
+ `aws.states.ProvisionedBucketSize` + + `ApiUsage` + + `aws.states.ProvisionedBucketSize.byApiUsage` +
+ `aws.states.ProvisionedBucketSize` + + `Service` + + `aws.states.ProvisionedBucketSize.byService` +
+ `aws.states.ProvisionedRefillRate` + + `ApiUsage` + + `aws.states.ProvisionedRefillRate.byApiUsage` +
+ `aws.states.ProvisionedRefillRate` + + `Service` + + `aws.states.ProvisionedRefillRate.byService` +
+ `aws.states.ThrottledEvents` + + `ApiUsage` + + `aws.states.ThrottledEvents.byApiUsage` +
+ `aws.states.ThrottledEvents` + + `Service` + + `aws.states.ThrottledEvents.byService` +
+ ## API Polling metrics [#aws-metrics-table] For a reference on available metrics from each one of the polling integrations and their names, [check out our doc on the individual integrations](/docs/infrastructure/amazon-integrations/get-started/introduction-aws-integrations/). diff --git a/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/connect-google-cloud-platform-services-new-relic.mdx b/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/connect-google-cloud-platform-services-new-relic.mdx index c90d71a7071..8f6b38a614e 100644 --- a/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/connect-google-cloud-platform-services-new-relic.mdx +++ b/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/connect-google-cloud-platform-services-new-relic.mdx @@ -132,6 +132,10 @@ Integrating your GCP project with New Relic requires you to authorize New Relic ## Connect GCP to New Relic infrastructure monitoring [#connect] + + If this is your first time setting up GCP with New Relic, you need to make sure that you've enabled Cloud Monitoring API for your project. You can do this in the [Google Cloud console](https://console.cloud.google.com/apis/api/monitoring.googleapis.com/). + + To connect your Google account to New Relic with user account authorization: 1. Go to **[one.newrelic.com > All capabilities](https://one.newrelic.com/all-capabilities) > Infrastructure > GCP**. On the **Google Cloud Services** integrations page, select **Add a GCP account**. diff --git a/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/integrations-custom-roles.mdx b/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/integrations-custom-roles.mdx index 6337fd6890d..cd201b6bc91 100644 --- a/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/integrations-custom-roles.mdx +++ b/src/content/docs/infrastructure/google-cloud-platform-integrations/get-started/integrations-custom-roles.mdx @@ -94,7 +94,7 @@ To customize your role you need to: - `cloudfunctions.locations.list` + * `cloudfunctions.functions.list` diff --git a/src/content/docs/infrastructure/host-integrations/host-integrations-list/envoy-integration.mdx b/src/content/docs/infrastructure/host-integrations/host-integrations-list/envoy-integration.mdx new file mode 100644 index 00000000000..c5f8d2b5cc9 --- /dev/null +++ b/src/content/docs/infrastructure/host-integrations/host-integrations-list/envoy-integration.mdx @@ -0,0 +1,152 @@ +--- +title: Envoy integration +tags: + - New Relic integrations + - Envoy integration +metaDescription: Use New Relic infrastructure agent to get a dashboard with metrics from your Envoy. +freshnessValidatedDate: 2024-03-21 +--- +import envoyDashboard from 'images/infrastructure_screenshot_full-envoy-dashboard.webp' + +Gain deep insights into Envoy's operations with seamless data integration into New Relic. Monitor key metrics to ensure the optimal performance of your Envoy backend clusters, listening sockets, HTTP routing, and cryptographic material. + +Envoy dashboard +
+After setting up our Envoy integration, we give you a dashboard for your Envoy metrics. +
+ + + + +## Install the infrastructure agent [#infra-install] + +To use the Envoy integration, you need to also [install the infrastructure agent](/docs/infrastructure/install-infrastructure-agent/get-started/install-infrastructure-agent-new-relic/) on the same host. The infrastructure agent monitors the host itself, while the integration you'll install in the next step extends your monitoring with Envoy-specific data. + + + +## Enable the Envoy integration with nri-prometheus + +To set up the Envoy integration, follow these steps: + +1. Create a file named `nri-prometheus-config.yml` in the integrations directory: + ```shell + touch /etc/newrelic-infra/integrations.d/nri-prometheus-config.yml + ``` +2. Add the following snippet to your `nri-prometheus-config.yml` file to enable the agent to capture Envoy data: + ```yml + integrations: + - name: nri-prometheus + config: + # When standalone is set to false nri-prometheus requires an infrastructure agent to work and send data. Defaults to true + standalone: false + + # When running with infrastructure agent emitters will have to include infra-sdk + emitters: infra-sdk + + # The name of your cluster. It's important to match other New Relic products to relate the data. + cluster_name: "YOUR_DESIRED_CLUSTER_NAME" + + targets: + - description: Envoy metrics list + urls: ["http://:9901/stats/prometheus"] + + # tls_config: + # ca_file_path: "/etc/etcd/etcd-client-ca.crt" + # cert_file_path: "/etc/etcd/etcd-client.crt" + # key_file_path: "/etc/etcd/etcd-client.key" + + # Whether the integration should run in verbose mode or not. Defaults to false + verbose: false + + # Whether the integration should run in audit mode or not. Defaults to false. + # Audit mode logs the uncompressed data sent to New Relic. Use this to log all data sent. + # It does not include verbose mode. This can lead to a high log volume, use with care + audit: false + + # The HTTP client timeout when fetching data from endpoints. Defaults to 30s. + # scrape_timeout: "30s" + + # Length in time to distribute the scraping from the endpoints + scrape_duration: "5s" + + # Number of worker threads used for scraping targets. + # For large clusters with many (>400) endpoints, slowly increase until scrape + # time falls between the desired `scrape_duration`. + # Increasing this value too much will result in huge memory consumption if too + # many metrics are being scraped. + # Default: 4 + # worker_threads: 4 + + # Whether the integration should skip TLS verification or not. Defaults to false + insecure_skip_verify: true + timeout: 10s + ``` + + + +## Forward Envoy logs + +Follow these steps to forward Envoy logs to New Relic: + +1. Create a log file named `logging.yml` in the following path: + + ```shell + cd /etc/newrelic-infra/logging.d + ``` +2. Add the following script to the `logging.yml` file. Replace `file` with your Envoy log filepath if needed: + +```yml +logs: + - name: envoy.log + file: /tmp/envoy.log + attributes: + logtype: envoy_logs + - name: envoy-admin.log + file: /tmp/admin_access.log + attributes: + logtype: envoy_admin_logs +``` + + + +## Restart the New Relic infrastructure agent + +Restart your infrastructure agent. + +```shell +sudo systemctl restart newrelic-infra.service +``` + +In a couple of minutes, your application will send metrics to [one.newrelic.com](https://one.newrelic.com). + + +## Find your data + +You can choose our pre-built dashboard template named `Envoy` to monitor your Envoy application metrics. Follow these steps to use our pre-built dashboard template: + +1. From [one.newrelic.com](https://one.newrelic.com), go to the **+ Add data** page +2. Click on **Dashboards** +3. In the search bar, type `Envoy` +4. The Envoy dashboard should appear. Click on it to install it + +Your Envoy dashboard is considered a custom dashboard and can be found in the **Dashboards** UI. For docs on using and editing dashboards, see [our dashboard docs](/docs/query-your-data/explore-query-data/dashboards/introduction-dashboards). + +Here is a NRQL query to check the Envoy downstream total connections: + +```sql +SELECT latest(envoy_http_downstream_cx_total) as 'Downstream total connections' from Metric +``` + + + +## What's next? + +To learn more about building NRQL queries and generating dashboards, check out these docs: + +* [Introduction to the query builder](/docs/query-your-data/explore-query-data/query-builder/introduction-query-builder) to create basic and advanced queries. +* [Introduction to dashboards](/docs/query-your-data/explore-query-data/dashboards/introduction-dashboards) to customize your dashboard and carry out different actions. +* [Manage your dashboard](/docs/query-your-data/explore-query-data/dashboards/manage-your-dashboard) to adjust your dashboards display mode, or to add more content to your dashboard. diff --git a/src/content/docs/infrastructure/host-integrations/host-integrations-list/jmx-monitoring-install.mdx b/src/content/docs/infrastructure/host-integrations/host-integrations-list/jmx-monitoring-install.mdx index d4e66743836..b3c787ef523 100644 --- a/src/content/docs/infrastructure/host-integrations/host-integrations-list/jmx-monitoring-install.mdx +++ b/src/content/docs/infrastructure/host-integrations/host-integrations-list/jmx-monitoring-install.mdx @@ -21,7 +21,7 @@ Our JMX [integration](/docs/integrations/host-integrations/getting-started/intro Before installing the integration, make sure that you meet the following requirements: * A New Relic account. Don't have one? [Sign up for free!](https://newrelic.com/signup) No credit card required. -* Java versions 8 or higher. +* Java version 8 or higher. * If you need to use a different Java version than the one configured in `PATH`, follow [New Relic's configuration documentation on GitHub](https://github.com/newrelic/nrjmx#configuring-java-version). * This integration does not support the IIOP protocol. diff --git a/src/content/docs/infrastructure/host-integrations/host-integrations-list/mongodb/mongodb-monitoring-integration-new.mdx b/src/content/docs/infrastructure/host-integrations/host-integrations-list/mongodb/mongodb-monitoring-integration-new.mdx index eda95a8839d..a7ec2ba87a5 100644 --- a/src/content/docs/infrastructure/host-integrations/host-integrations-list/mongodb/mongodb-monitoring-integration-new.mdx +++ b/src/content/docs/infrastructure/host-integrations/host-integrations-list/mongodb/mongodb-monitoring-integration-new.mdx @@ -509,14 +509,15 @@ The following configuration options are available: id="basic-config" title="Basic configuration" > - This is the basic configuration used to collect all metrics. Add credentials to your mongodb UR if they are required. + This is the basic configuration used to collect all metrics. Add credentials to your mongodb URI if they are required. + NOTE: If your password has special characters in it, you will need to put single quotes around the URI ```yml integrations: - name: nri-mongodb3 config: mongodb_cluster_name: my_cluster - mongodb_uri: mongodb://username:password@localhost:27017 + mongodb_uri: 'mongodb://username:password@localhost:27017' exporter_port: 9126 ``` @@ -547,12 +548,12 @@ The following configuration options are available: - name: nri-mongodb3 config: mongodb_cluster_name: my_cluster - mongodb_uri: mongodb://username:password@cluster1:27017 + mongodb_uri: 'mongodb://username:password@cluster1:27017' exporter_port: 9126 - name: nri-mongodb3 config: mongodb_cluster_name: my_second_cluster - mongodb_uri: mongodb://username:password@cluster2:27017 + mongodb_uri: 'mongodb://username:password@cluster2:27017' exporter_port: 9127 ``` @@ -567,7 +568,7 @@ The following configuration options are available: - name: nri-mongodb3 config: mongodb_cluster_name: my_cluster - mongodb_uri: mongodb://username:password@localhost:27017 + mongodb_uri: 'mongodb://username:password@localhost:27017' collection_filters: "db1,db2.collection2" exporter_port: 9126 ``` @@ -583,7 +584,7 @@ The following configuration options are available: - name: nri-mongodb3 config: mongodb_cluster_name: my_cluster - mongodb_uri: mongodb://username:password@localhost:27017/my_auth_source + mongodb_uri: 'mongodb://username:password@localhost:27017/my_auth_source' exporter_port: 9126 ``` @@ -613,7 +614,7 @@ The following configuration options are available: - name: nri-mongodb3 config: mongodb_cluster_name: my_cluster - mongodb_uri: mongodb+srv://username:password@my_atlas_url.mongodb.net/?retryWrites=true&w=majority + mongodb_uri: 'mongodb+srv://username:password@my_atlas_url.mongodb.net/?retryWrites=true&w=majority' mongodb_direct_connect: false exporter_port: 9126 ``` diff --git a/src/content/docs/infrastructure/host-integrations/host-integrations-list/nvidia-gpu-integration.mdx b/src/content/docs/infrastructure/host-integrations/host-integrations-list/nvidia-gpu-integration.mdx index c35c0804f09..4a2a04368e8 100644 --- a/src/content/docs/infrastructure/host-integrations/host-integrations-list/nvidia-gpu-integration.mdx +++ b/src/content/docs/infrastructure/host-integrations/host-integrations-list/nvidia-gpu-integration.mdx @@ -66,56 +66,56 @@ Follow these steps to configure Flex: ```yml --- - integrations: - - name: nri-flex - # interval: 30s - config: - name: NvidiaSMI - variable_store: - metrics: - "name,driver_version,count,serial,pci.bus_id,pci.domain,pci.bus,\ - pci.device_id,pci.sub_device_id,pcie.link.gen.current,pcie.link.gen.max,\ - pcie.link.width.current,pcie.link.width.max,index,display_mode,display_active,\ - persistence_mode,accounting.mode,accounting.buffer_size,driver_model.current,\ - driver_model.pending,vbios_version,inforom.img,inforom.oem,inforom.ecc,inforom.pwr,\ - gom.current,gom.pending,fan.speed,pstate,clocks_throttle_reasons.supported,\ - clocks_throttle_reasons.gpu_idle,clocks_throttle_reasons.applications_clocks_setting,\ - clocks_throttle_reasons.sw_power_cap,clocks_throttle_reasons.hw_slowdown,clocks_throttle_reasons.hw_thermal_slowdown,\ - clocks_throttle_reasons.hw_power_brake_slowdown,clocks_throttle_reasons.sw_thermal_slowdown,\ - clocks_throttle_reasons.sync_boost,memory.total,memory.used,memory.free,compute_mode,\ - utilization.gpu,utilization.memory,encoder.stats.sessionCount,encoder.stats.averageFps,\ - encoder.stats.averageLatency,ecc.mode.current,ecc.mode.pending,ecc.errors.corrected.volatile.device_memory,\ - ecc.errors.corrected.volatile.dram,ecc.errors.corrected.volatile.register_file,ecc.errors.corrected.volatile.l1_cache,\ - ecc.errors.corrected.volatile.l2_cache,ecc.errors.corrected.volatile.texture_memory,ecc.errors.corrected.volatile.cbu,\ - ecc.errors.corrected.volatile.sram,ecc.errors.corrected.volatile.total,ecc.errors.corrected.aggregate.device_memory,\ - ecc.errors.corrected.aggregate.dram,ecc.errors.corrected.aggregate.register_file,ecc.errors.corrected.aggregate.l1_cache,\ - ecc.errors.corrected.aggregate.l2_cache,ecc.errors.corrected.aggregate.texture_memory,ecc.errors.corrected.aggregate.cbu,\ - ecc.errors.corrected.aggregate.sram,ecc.errors.corrected.aggregate.total,ecc.errors.uncorrected.volatile.device_memory,\ - ecc.errors.uncorrected.volatile.dram,ecc.errors.uncorrected.volatile.register_file,ecc.errors.uncorrected.volatile.l1_cache,\ - ecc.errors.uncorrected.volatile.l2_cache,ecc.errors.uncorrected.volatile.texture_memory,ecc.errors.uncorrected.volatile.cbu,\ - ecc.errors.uncorrected.volatile.sram,ecc.errors.uncorrected.volatile.total,ecc.errors.uncorrected.aggregate.device_memory,\ - ecc.errors.uncorrected.aggregate.dram,ecc.errors.uncorrected.aggregate.register_file,ecc.errors.uncorrected.aggregate.l1_cache,\ - ecc.errors.uncorrected.aggregate.l2_cache,ecc.errors.uncorrected.aggregate.texture_memory,ecc.errors.uncorrected.aggregate.cbu,\ - ecc.errors.uncorrected.aggregate.sram,ecc.errors.uncorrected.aggregate.total,retired_pages.single_bit_ecc.count,\ - retired_pages.double_bit.count,retired_pages.pending,temperature.gpu,temperature.memory,power.management,power.draw,\ - power.limit,enforced.power.limit,power.default_limit,power.min_limit,power.max_limit,clocks.current.graphics,clocks.current.sm,\ - clocks.current.memory,clocks.current.video,clocks.applications.graphics,clocks.applications.memory,\ - clocks.default_applications.graphics,clocks.default_applications.memory,clocks.max.graphics,clocks.max.sm,clocks.max.memory,\ - mig.mode.current,mig.mode.pending" - apis: - - name: NvidiaGpu - commands: - - run: nvidia-smi --query-gpu=${'{var:metrics}'} --format=csv # update this if you have an alternate path - output: csv - rename_keys: - " ": "" - "\\[MiB\\]": ".MiB" - "\\[%\\]": ".percent" - "\\[W\\]": ".watts" - "\\[MHz\\]": ".MHz" - value_parser: - "clocks|power|fan|memory|temp|util|ecc|stats|gom|mig|count|pcie": '\d*\.?\d+' - '.': '\[N\/A\]|N\/A|Not Active|Disabled|Enabled|Default' +integrations: + - name: nri-flex + # interval: 30s + config: + name: NvidiaSMI + variable_store: + metrics: + "name,driver_version,count,serial,pci.bus_id,pci.domain,pci.bus,\ + pci.device_id,pci.sub_device_id,pcie.link.gen.current,pcie.link.gen.max,\ + pcie.link.width.current,pcie.link.width.max,index,display_mode,display_active,\ + persistence_mode,accounting.mode,accounting.buffer_size,driver_model.current,\ + driver_model.pending,vbios_version,inforom.img,inforom.oem,inforom.ecc,inforom.pwr,\ + gom.current,gom.pending,fan.speed,pstate,clocks_throttle_reasons.supported,\ + clocks_throttle_reasons.gpu_idle,clocks_throttle_reasons.applications_clocks_setting,\ + clocks_throttle_reasons.sw_power_cap,clocks_throttle_reasons.hw_slowdown,clocks_throttle_reasons.hw_thermal_slowdown,\ + clocks_throttle_reasons.hw_power_brake_slowdown,clocks_throttle_reasons.sw_thermal_slowdown,\ + clocks_throttle_reasons.sync_boost,memory.total,memory.used,memory.free,compute_mode,\ + utilization.gpu,utilization.memory,encoder.stats.sessionCount,encoder.stats.averageFps,\ + encoder.stats.averageLatency,ecc.mode.current,ecc.mode.pending,ecc.errors.corrected.volatile.device_memory,\ + ecc.errors.corrected.volatile.dram,ecc.errors.corrected.volatile.register_file,ecc.errors.corrected.volatile.l1_cache,\ + ecc.errors.corrected.volatile.l2_cache,ecc.errors.corrected.volatile.texture_memory,ecc.errors.corrected.volatile.cbu,\ + ecc.errors.corrected.volatile.sram,ecc.errors.corrected.volatile.total,ecc.errors.corrected.aggregate.device_memory,\ + ecc.errors.corrected.aggregate.dram,ecc.errors.corrected.aggregate.register_file,ecc.errors.corrected.aggregate.l1_cache,\ + ecc.errors.corrected.aggregate.l2_cache,ecc.errors.corrected.aggregate.texture_memory,ecc.errors.corrected.aggregate.cbu,\ + ecc.errors.corrected.aggregate.sram,ecc.errors.corrected.aggregate.total,ecc.errors.uncorrected.volatile.device_memory,\ + ecc.errors.uncorrected.volatile.dram,ecc.errors.uncorrected.volatile.register_file,ecc.errors.uncorrected.volatile.l1_cache,\ + ecc.errors.uncorrected.volatile.l2_cache,ecc.errors.uncorrected.volatile.texture_memory,ecc.errors.uncorrected.volatile.cbu,\ + ecc.errors.uncorrected.volatile.sram,ecc.errors.uncorrected.volatile.total,ecc.errors.uncorrected.aggregate.device_memory,\ + ecc.errors.uncorrected.aggregate.dram,ecc.errors.uncorrected.aggregate.register_file,ecc.errors.uncorrected.aggregate.l1_cache,\ + ecc.errors.uncorrected.aggregate.l2_cache,ecc.errors.uncorrected.aggregate.texture_memory,ecc.errors.uncorrected.aggregate.cbu,\ + ecc.errors.uncorrected.aggregate.sram,ecc.errors.uncorrected.aggregate.total,retired_pages.single_bit_ecc.count,\ + retired_pages.double_bit.count,retired_pages.pending,temperature.gpu,temperature.memory,power.management,power.draw,\ + power.limit,enforced.power.limit,power.default_limit,power.min_limit,power.max_limit,clocks.current.graphics,clocks.current.sm,\ + clocks.current.memory,clocks.current.video,clocks.applications.graphics,clocks.applications.memory,\ + clocks.default_applications.graphics,clocks.default_applications.memory,clocks.max.graphics,clocks.max.sm,clocks.max.memory,\ + mig.mode.current,mig.mode.pending" + apis: + - name: NvidiaGpu + commands: + - run: nvidia-smi --query-gpu=${var:metrics} --format=csv # update this if you have an alternate path + output: csv + rename_keys: + " ": "" + "\\[MiB\\]": ".MiB" + "\\[%\\]": ".percent" + "\\[W\\]": ".watts" + "\\[MHz\\]": ".MHz" + value_parser: + "clocks|power|fan|memory|temp|util|ecc|stats|gom|mig|count|pcie": '\d*\.?\d+' + '.': '\[N\/A\]|N\/A|Not Active|Disabled|Enabled|Default' ``` diff --git a/src/content/docs/infrastructure/host-integrations/host-integrations-list/temporal-monitoring-integration.mdx b/src/content/docs/infrastructure/host-integrations/host-integrations-list/temporal-monitoring-integration.mdx index 1188019e205..fdba4e03e57 100644 --- a/src/content/docs/infrastructure/host-integrations/host-integrations-list/temporal-monitoring-integration.mdx +++ b/src/content/docs/infrastructure/host-integrations/host-integrations-list/temporal-monitoring-integration.mdx @@ -34,6 +34,7 @@ To get the Temporal metrics you need to do some steps as followed: Install the docker and docker-compose on your host. ```yml +sudo apt-get update sudo apt install docker sudo apt install docker-compose ``` @@ -48,6 +49,9 @@ you need to add the prometheus endpoint and port in the `docker-compose.yml` fil ```yml sudo nano docker-compose/docker-compose.yml ``` +- Below the `container_name: temporal` in the Environment section, include the Prometheus endpoint as follows: `- PROMETHEUS_ENDPOINT=0.0.0.0:8000`. +- Similarly, within the same container, beneath the ports section, specify the port as: `- 8000:8000`. +- Here’s an example of how to expose a Prometheus endpoint on your local docker-compose Temporal Cluster configuration: ```yml Environment: @@ -64,66 +68,136 @@ You can check the Temporal server running on the below urls: - The Temporal Server will be available on `localhost:7233`. - The Temporal Web UI will be available at `http://:8080` -- The Temporal server metrics will be available on the `http://:8080/metrics` +- The Temporal server metrics will be available on the `http://:8000/metrics` ## Expose Java SDK metrics [#expose-java-sdk-metrics] You can set up the Prometheus registry and Micrometer stats reporter, set the scope, and expose an endpoint from which Prometheus can scrape the SDK Client metrics in the following way. -1. To create `MetricsWorker.java` file in your project main folder like as below. +1. +To set up metrics for the Java SDK temporal, create a `MetricsWorker.java` file in the root directory of the project. ```java -//... -// You need to import the following packages to set up metrics in Java. -// See the Developer's guide for packages required for the other SDKs. +package ; // please add your java application main directory name. + import com.sun.net.httpserver.HttpServer; import com.uber.m3.tally.RootScopeBuilder; import com.uber.m3.tally.Scope; -import com.uber.m3.util.Duration; +import com.uber.m3.tally.StatsReporter; import com.uber.m3.util.ImmutableMap; +import io.micrometer.prometheus.PrometheusConfig; +import io.micrometer.prometheus.PrometheusMeterRegistry; +import io.temporal.client.WorkflowClient; +import io.temporal.client.WorkflowClientOptions; +import io.temporal.common.reporter.MicrometerClientStatsReporter; +import io.temporal.serviceclient.WorkflowServiceStubs; +import io.temporal.serviceclient.WorkflowServiceStubsOptions; +import io.temporal.worker.Worker; +import io.temporal.worker.WorkerFactory; + +public class MetricsWorker { + static final String WORK_FLOW_TASK_QUEUE = "WORK_FLOW_TASK_QUEUE"; //This can be a work flow task name used to differentiate the metrics logs from other work flow + + public static void main(String[] args) { + + PrometheusMeterRegistry registry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT); + StatsReporter reporter = new MicrometerClientStatsReporter(registry); + + // set up a new scope, report every 10 seconds + Scope scope = new RootScopeBuilder() + .tags(ImmutableMap.of( + "workerCustomTag1", + "workerCustomTag1Value", + "workerCustomTag2", + "workerCustomTag2Value")) + .reporter(reporter) + .reportEvery(com.uber.m3.util.Duration.ofSeconds(10)); + + // For Prometheus collection, expose the scrape endpoint at port 8077. See Micrometer documentation for details on starting the Prometheus scrape endpoint. For example, + HttpServer scrapeEndpoint = MetricsUtils.startPrometheusScrapeEndpoint(registry, 8077); //note: MetricsUtils is a utility file with the scrape endpoint configuration. See Micrometer docs for details on this configuration. + // Stopping the starter stops the HTTP server that exposes the scrape endpoint. + //Runtime.getRuntime().addShutdownHook(new Thread(() -> scrapeEndpoint.stop(1))); + + //Create Workflow service stubs to connect to the Frontend Service. + WorkflowServiceStubs service = WorkflowServiceStubs.newServiceStubs( + WorkflowServiceStubsOptions.newBuilder() + .setMetricsScope(scope) //set the metrics scope for the WorkflowServiceStubs + .build()); + + //Create a Workflow service client, which can be used to start, signal, and query Workflow Executions. + WorkflowClient yourClient = WorkflowClient.newInstance(service, + WorkflowClientOptions.newBuilder().build()); + + + Runtime.getRuntime().addShutdownHook(new Thread(() -> scrapeEndpoint.stop(1))); + // Add metrics scope to workflow service stub options + WorkerFactory factory = WorkerFactory.newInstance(yourClient); + + Worker worker = factory.newWorker(WORK_FLOW_TASK_QUEUE); + worker.registerWorkflowImplementationTypes(SampleWorkflowImpl.class);//Design a workflow incorporating temporal elements and invoking activities within it. Determine where to capture metrics logs and register them with the worker + worker.registerActivitiesImplementations(new SampleActivityImpl()); // Develop an Activity interface utilizing temporal annotations, proceed to its implementation, and establish a connection with the worker by mapping it to registerActivities + + factory.start(); + } +} +``` +2. To create a `MetricsUtils.java` file in the main directory of the project, containing configurations for the scraping endpoint. - // See the Micrometer documentation for configuration details on other supported monitoring systems. - // This example shows how to set up the Prometheus registry and stats reported. - - PrometheusMeterRegistry registry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT); - StatsReporter reporter = new MicrometerClientStatsReporter(registry); - - // set up a new scope, report every 10 seconds - Scope scope = new RootScopeBuilder() - .tags(ImmutableMap.of( - "workerCustomTag1", - "workerCustomTag1Value", - "workerCustomTag2", - "workerCustomTag2Value")) - .reporter(reporter) - .reportEvery(com.uber.m3.util.Duration.ofSeconds(10)); - - // For Prometheus collection, expose the scrape endpoint at port 8077. See Micrometer documentation for details on starting the Prometheus scrape endpoint. For example, - HttpServer scrapeEndpoint = MetricsUtils.startPrometheusScrapeEndpoint(registry, 8077); //note: MetricsUtils is a utility file with the scrape endpoint configuration. See Micrometer docs for details on this configuration. - // Stopping the starter stops the HTTP server that exposes the scrape endpoint. - //Runtime.getRuntime().addShutdownHook(new Thread(() -> scrapeEndpoint.stop(1))); +```java +package ; // please add your java application main directory name. - //Create Workflow service stubs to connect to the Frontend Service. - WorkflowServiceStubs service = WorkflowServiceStubs.newServiceStubs( - WorkflowServiceStubsOptions.newBuilder() - .setMetricsScope(scope) //set the metrics scope for the WorkflowServiceStubs - .build()); +import com.sun.net.httpserver.HttpServer; +import io.micrometer.prometheus.PrometheusMeterRegistry; +import static java.nio.charset.StandardCharsets.UTF_8; +import java.io.IOException; +import java.io.OutputStream; +import java.net.InetSocketAddress; + +public class MetricsUtils { + public static HttpServer startPrometheusScrapeEndpoint( + PrometheusMeterRegistry registry, int port) { + try { + HttpServer server = HttpServer.create(new InetSocketAddress(port), 0); + server.createContext( + "/metrics", + httpExchange -> { + String response = registry.scrape(); + httpExchange.sendResponseHeaders(200, response.getBytes(UTF_8).length); + try (OutputStream os = httpExchange.getResponseBody()) { + os.write(response.getBytes(UTF_8)); + } + }); + + server.start(); + return server; + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} +``` +3. Add the dependency in `build.gradle` file under the `dependency section`. - //Create a Workflow service client, which can be used to start, signal, and query Workflow Executions. - WorkflowClient yourClient = WorkflowClient.newInstance(service, - WorkflowClientOptions.newBuilder().build()); +```java +implementation "io.micrometer:micrometer-registry-prometheus" +``` +4. Copy the snippet below and modify the `mainClass` with the directory relevant to your project in the `build.gradle` file task to start the worker. - //... +```java +task startMetricsWorker(type: JavaExec) { + mainClass = '' // Add your MetricsWorker file directory. + classpath = sourceSets.main.runtimeClasspath +} ``` -2. Go to your project directory and build. +5. Go to your project directory and build. ```yml ./gradlew build ``` -3. Start the Worker. +6. Start the worker. ```yml -./gradlew -q execute -PmainClass= +./gradlew startMetricsWorker ``` -4. See the worker metrics on the exposed Prometheus Scrape Endpoint: `http://:8077/metrics`. +7. See the worker metrics on the exposed Prometheus Scrape Endpoint: `http://:8077/metrics`. &> /tmp/temporal.log & +``` +Afterwards, verify the log file located in the `/tmp/` directory named `temporal.log`. + +### Forwarding Temporal logs to New Relic [#temporal-logs-to-newrelic] + +You can use our [log forwarding](https://docs.newrelic.com/docs/logs/forward-logs/forward-your-logs-using-infrastructure-agent/) to forward Temporal logs to New Relic. +On Linux machines, your log file named logging.yml should be present in this path: +```shell +cd /etc/newrelic-infra/logging.d/ +``` +Once the log file is created, include the subsequent script into the `logging.yml` file: +```yml +logs: + - name: temporal_logs + file: /tmp/temporal.log + attributes: + logtype: temporal_logs +``` +### Restart the Ingrastructure agent + +Before you can start reading your data, use the instructions in our [infrastructure agent docs](https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/manage-your-agent/start-stop-restart-infrastructure-agent/) to restart your infrastructure agent. ```bash sudo systemctl restart newrelic-infra.service @@ -196,7 +301,7 @@ Your Temporal dashboard is considered a custom dashboard and can be found in the Here is a NRQL query to check the Temporal request latency sum: ```sql -SELECT sum(temp oral_request_latency_sum) FROM Metric WHERE scrapedTargetURL = 'http://:8000/metrics' +SELECT sum(temporal_request_latency_sum) FROM Metric ``` diff --git a/src/content/docs/infrastructure/infrastructure-monitoring/get-started/get-started-infrastructure-monitoring.mdx b/src/content/docs/infrastructure/infrastructure-monitoring/get-started/get-started-infrastructure-monitoring.mdx index 153e62591ea..16792bdbf2e 100644 --- a/src/content/docs/infrastructure/infrastructure-monitoring/get-started/get-started-infrastructure-monitoring.mdx +++ b/src/content/docs/infrastructure/infrastructure-monitoring/get-started/get-started-infrastructure-monitoring.mdx @@ -41,8 +41,6 @@ redirects: - /docs/infrastructure - /docs/infrastructure/new-relic-infrastructure/installation - /docs/infrastructure/infrastructure-monitoring -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- @@ -74,7 +72,7 @@ Read on to learn about our infrastructure monitoring solutions, and to see how t -Want to learn how to use infrastrcuture monitoring effectively? Try out our guided [infrastructure and root causes lab](/docs/infrastructure/infrastructure-monitoring/get-started/identify-root-causes-guide). +Want to learn how to use infrastructure monitoring effectively? Try out our guided [infrastructure and root causes lab](/docs/infrastructure/infrastructure-monitoring/get-started/identify-root-causes-guide). ## Our infrastructure monitoring solutions [#infrastructure-solutions] @@ -182,4 +180,4 @@ To learn more about features in the UI, see [Introduction to the infrastructure Do you use Datadog to monitor your infrastructure but are interested in trying out New Relic's monitoring capabilities for free? See our [guide on how to migrate from Datadog](/docs/journey-migration/migrating-from-dd/) to learn how. - \ No newline at end of file + diff --git a/src/content/docs/infrastructure/install-infrastructure-agent/configuration/infrastructure-agent-configuration-settings.mdx b/src/content/docs/infrastructure/install-infrastructure-agent/configuration/infrastructure-agent-configuration-settings.mdx index 6eb1fcb98a1..bd176e62918 100644 --- a/src/content/docs/infrastructure/install-infrastructure-agent/configuration/infrastructure-agent-configuration-settings.mdx +++ b/src/content/docs/infrastructure/install-infrastructure-agent/configuration/infrastructure-agent-configuration-settings.mdx @@ -1555,7 +1555,7 @@ Metrics can also be enriched with extended cloud metadata (including custom reso Linux: `etc/newrelic-infra/integrations.d/` - Windows: `\Program Files\NewRelic\newrelic-infra\inregrations.d` + Windows: `\Program Files\NewRelic\newrelic-infra\integrations.d` @@ -2070,6 +2070,65 @@ Metrics can also be enriched with extended cloud metadata (including custom reso + + + Defines the temp directory used as persisting storage for the integrations SDK synchronization. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ YML option name + + Environment variable + + Type + + Default + + Version +
+ `default_integrations_temp_dir` + + `NRIA_DEFAULT_INTEGRATIONS_TEMP_DIR` + + string + + `/tmp/nr-integrations` + + [1.50.0](/docs/release-notes/infrastructure-release-notes/infrastructure-agent-release-notes/new-relic-infrastructure-agent-1500/) +
+ +
## Inventory variables diff --git a/src/content/docs/infrastructure/install-infrastructure-agent/get-started/install-infrastructure-agent.mdx b/src/content/docs/infrastructure/install-infrastructure-agent/get-started/install-infrastructure-agent.mdx index 53c1136f4b0..ef819567ff1 100644 --- a/src/content/docs/infrastructure/install-infrastructure-agent/get-started/install-infrastructure-agent.mdx +++ b/src/content/docs/infrastructure/install-infrastructure-agent/get-started/install-infrastructure-agent.mdx @@ -12,8 +12,6 @@ redirects: - /docs/infrastructure/install-infrastructure-agent/get-started/install-infrastructure-agent-new-relic - /docs/infrastructure/new-relic-infrastructure/installation - /docs/infrastructure/install-infrastructure-agent -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/infrastructure/install-infrastructure-agent/get-started/requirements-infrastructure-agent.mdx b/src/content/docs/infrastructure/install-infrastructure-agent/get-started/requirements-infrastructure-agent.mdx index 2dc50f1adcf..00dee8ac281 100644 --- a/src/content/docs/infrastructure/install-infrastructure-agent/get-started/requirements-infrastructure-agent.mdx +++ b/src/content/docs/infrastructure/install-infrastructure-agent/get-started/requirements-infrastructure-agent.mdx @@ -152,7 +152,7 @@ The infrastructure agent supports these operating systems up to their manufactur - Versions 12.1, 12.2, 12.3, 12.4, 12.5, 15, 15.1, 15.2, 15.3, 15.4, 15.5 + Versions 12.5, 15.2, 15.3, 15.4, 15.5 diff --git a/src/content/docs/infrastructure/install-infrastructure-agent/linux-installation/install-infrastructure-monitoring-agent-linux.mdx b/src/content/docs/infrastructure/install-infrastructure-agent/linux-installation/install-infrastructure-monitoring-agent-linux.mdx index 1b7e4605bf5..b50d1844488 100644 --- a/src/content/docs/infrastructure/install-infrastructure-agent/linux-installation/install-infrastructure-monitoring-agent-linux.mdx +++ b/src/content/docs/infrastructure/install-infrastructure-agent/linux-installation/install-infrastructure-monitoring-agent-linux.mdx @@ -47,8 +47,6 @@ redirects: - /docs/servers/new-relic-servers-linux/installation-configuration/install-redhat-centos-new-relic-servers - /docs/servers/new-relic-servers-linux/installation-configuration/install-ubuntu-debian-new-relic-servers - /docs/infrastructure/install-the-infrastructure-agent/linux-installation -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- @@ -327,54 +325,6 @@ To install infrastructure in Linux, follow these instructions: title={<>suse icon SLES} > - **SLES 12.1 (x86)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.1/x86_64/newrelic-infra.repo - ``` - - **SLES 12.1 (ARM)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.1/aarch64/newrelic-infra.repo - ``` - - **SLES 12.2 (x86)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.2/x86_64/newrelic-infra.repo - ``` - - **SLES 12.2 (ARM)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.2/aarch64/newrelic-infra.repo - ``` - - **SLES 12.3 (x86)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.3/x86_64/newrelic-infra.repo - ``` - - **SLES 12.3 (ARM)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.3/aarch64/newrelic-infra.repo - ``` - - **SLES 12.4 (x86)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.4/x86_64/newrelic-infra.repo - ``` - - **SLES 12.4 (ARM)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.4/aarch64/newrelic-infra.repo - ``` - **SLES 12.5 (x86)** ```bash @@ -387,18 +337,6 @@ To install infrastructure in Linux, follow these instructions: sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/12.5/aarch64/newrelic-infra.repo ``` - **SLES 15.1 (x86)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/15.1/x86_64/newrelic-infra.repo - ``` - - **SLES 15.1 (ARM)** - - ```bash - sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/15.1/aarch64/newrelic-infra.repo - ``` - **SLES 15.2 (x86)** ```bash @@ -434,6 +372,18 @@ To install infrastructure in Linux, follow these instructions: ```bash sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/15.4/aarch64/newrelic-infra.repo ``` + + **SLES 15.4 (x86)** + + ```bash + sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/15.5/x86_64/newrelic-infra.repo + ``` + + **SLES 15.4 (ARM)** + + ```bash + sudo curl -o /etc/zypp/repos.d/newrelic-infra.repo https://download.newrelic.com/infrastructure_agent/linux/zypp/sles/15.5/aarch64/newrelic-infra.repo + ``` 5. Refresh the repositories: diff --git a/src/content/docs/infrastructure/install-infrastructure-agent/manage-your-agent/start-stop-restart-infrastructure-agent.mdx b/src/content/docs/infrastructure/install-infrastructure-agent/manage-your-agent/start-stop-restart-infrastructure-agent.mdx index 4c04d300015..f9a92e514f0 100644 --- a/src/content/docs/infrastructure/install-infrastructure-agent/manage-your-agent/start-stop-restart-infrastructure-agent.mdx +++ b/src/content/docs/infrastructure/install-infrastructure-agent/manage-your-agent/start-stop-restart-infrastructure-agent.mdx @@ -16,8 +16,6 @@ redirects: - /docs/servers/new-relic-servers-linux/maintenance - /docs/servers/new-relic-servers-windows/maintenance - /docs/infrastructure/new-relic-infrastructure/configuration/start-stop-restart-check-infrastructure-agent-status -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/infrastructure/prometheus-integrations/install-configure-prometheus-agent/migration-guide.mdx b/src/content/docs/infrastructure/prometheus-integrations/install-configure-prometheus-agent/migration-guide.mdx index 4d24dee760c..b35a8f195da 100644 --- a/src/content/docs/infrastructure/prometheus-integrations/install-configure-prometheus-agent/migration-guide.mdx +++ b/src/content/docs/infrastructure/prometheus-integrations/install-configure-prometheus-agent/migration-guide.mdx @@ -186,8 +186,6 @@ We also give you a curated dashboard for self-metrics with performance and healt ## Keep Prometheus OpenMetrics integration during the migration [#keep-openmetrics] -Although our Prometheus OpenMetrics integration in Kubernetes has been replaced by the NR Prometheus agent, it will be supported for a period of time (~~June 2023~~January 2024). - New Relic wants to ensure a smooth and seamless migration to our customers. You can keep either the Prometheus OpenMetrics integration, `nri-prometheus`, the Prometheus agent, `newrelic-prometheus-agent`, or both. To preserve nri-prometheus and not using Prometheus agent as of now, set your [`values.yaml`](https://github.com/newrelic/helm-charts/blob/master/charts/nri-bundle/values.yaml) file as follows: diff --git a/src/content/docs/licenses/overview.mdx b/src/content/docs/licenses/overview.mdx index 344314e063a..f0a62f31383 100644 --- a/src/content/docs/licenses/overview.mdx +++ b/src/content/docs/licenses/overview.mdx @@ -1,6 +1,5 @@ --- title: Licenses -type: landingPage tags: - Licenses metaDescription: Licenses landing page diff --git a/src/content/docs/logs/forward-logs/enable-log-management-new-relic.mdx b/src/content/docs/logs/forward-logs/enable-log-management-new-relic.mdx index d57c258da63..1242138ad66 100644 --- a/src/content/docs/logs/forward-logs/enable-log-management-new-relic.mdx +++ b/src/content/docs/logs/forward-logs/enable-log-management-new-relic.mdx @@ -19,8 +19,6 @@ redirects: - /docs/logs/new-relic-logs - /docs/logs/forward-logs - /docs/logs/enable-log-management-in-new-relic/enable-log-monitoring-in-new-relicYES -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/logs/forward-logs/fluent-bit-plugin-log-forwarding.mdx b/src/content/docs/logs/forward-logs/fluent-bit-plugin-log-forwarding.mdx index 5f2e0481f72..e8536d2c58d 100644 --- a/src/content/docs/logs/forward-logs/fluent-bit-plugin-log-forwarding.mdx +++ b/src/content/docs/logs/forward-logs/fluent-bit-plugin-log-forwarding.mdx @@ -84,22 +84,27 @@ Fluent Bit needs to know the location of the New Relic plugin and the New Relic Name tail Tag my.tag Path /PATH/TO/YOUR/LOG/FILE + # If you have multiple sources, just add another [INPUT] section like this: + # [INPUT] + # Name tail + # Tag my.other.tag + # Path /PATH/TO/SOME/OTHER/LOG/FILE - # having multiple [FILTER] blocks allows one to control the flow of changes as they read top down. + # Having multiple [FILTER] blocks allows you to control the flow of changes as they read top down. [FILTER] Name modify - # here we only match on one tag, my.tag, defined in the [INPUT] section earlier + # Here we only match on one tag, my.tag, defined in the [INPUT] section earlier Match my.tag - # below, we're renaming the host.cpu attribute to CPU + # Below, we're renaming the host.cpu attribute to CPU Rename host.cpu CPU [FILTER] Name record_modifier - # match on all tags, *, so all logs get decorated per the Record clauses below. Record adds attributes + their values to each record. + # Match on all tags, *, so all logs get decorated per the Record clauses below. Record adds attributes + their values to each record. Match * - # adding a logtype attribute ensures your logs will be automatically parsed by our built-in parsing rules + # Adding a logtype attribute ensures your logs will be automatically parsed by our built-in parsing rules Record logtype nginx - # add the server's hostname to all logs generated + # Add the server's hostname to all logs generated Record hostname ${HOSTNAME} [OUTPUT] diff --git a/src/content/docs/logs/forward-logs/forward-your-logs-using-infrastructure-agent.mdx b/src/content/docs/logs/forward-logs/forward-your-logs-using-infrastructure-agent.mdx index 4e28983f90d..405c25dbe91 100644 --- a/src/content/docs/logs/forward-logs/forward-your-logs-using-infrastructure-agent.mdx +++ b/src/content/docs/logs/forward-logs/forward-your-logs-using-infrastructure-agent.mdx @@ -12,8 +12,6 @@ redirects: - /docs/logs/enable-new-relic-logs/1-enable-logs/forward-your-logs-using-new-relic-infrastructure - /docs/logs/enable-log-monitoring-new-relic/enable-log-monitoring-new-relic/forward-your-logs-using-infrastructure-agent - /docs/logs/enable-log-management-new-relic/enable-log-monitoring-new-relic/forward-your-logs-using-infrastructure-agent -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- @@ -1119,20 +1117,20 @@ The following instructions summarize how to increase these limits if you want to apt-cache showpkg fluent-bit ``` - To upgrade to a particular Fluent Bit version, just run (in the following examples, version `2.0.8` is used): + To upgrade to a particular Fluent Bit version, just run (in the following examples, version `2.2.2` is used): RPM: ```bash # Remove command only required when downgrading to a previous version sudo yum remove fluent-bit - sudo yum install fluent-bit-2.0.8-1 + sudo yum install fluent-bit-2.2.2-1 ``` DEB: ```bash - sudo apt install fluent-bit=2.0.8 + sudo apt install fluent-bit=2.2.2 ``` diff --git a/src/content/docs/logs/get-started/get-started-log-management.mdx b/src/content/docs/logs/get-started/get-started-log-management.mdx index a5ff80f77d9..6b19bd58c03 100644 --- a/src/content/docs/logs/get-started/get-started-log-management.mdx +++ b/src/content/docs/logs/get-started/get-started-log-management.mdx @@ -20,8 +20,6 @@ redirects: - /docs/logs/log-management/get-started - /docs/logs/log-management/get-started/get-started-log-management - /docs/logs -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/logs/get-started/live-archives.mdx b/src/content/docs/logs/get-started/live-archives.mdx index 88552d55249..5cea9558d2a 100644 --- a/src/content/docs/logs/get-started/live-archives.mdx +++ b/src/content/docs/logs/get-started/live-archives.mdx @@ -122,7 +122,7 @@ You can also edit an existing partition clicking the ### See your data consumption [#data-consumption] diff --git a/src/content/docs/logs/log-api/introduction-log-api.mdx b/src/content/docs/logs/log-api/introduction-log-api.mdx index 3f997f1d153..52c601a24a9 100644 --- a/src/content/docs/logs/log-api/introduction-log-api.mdx +++ b/src/content/docs/logs/log-api/introduction-log-api.mdx @@ -16,8 +16,6 @@ redirects: - /docs/telemetry-data-platform/get-data/apis/introduction-log-api - /docs/logs/log-management/log-api - /docs/logs/log-management/log-api/introduction-log-api -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/logs/ui-data/timestamp-support.mdx b/src/content/docs/logs/ui-data/timestamp-support.mdx index cc21ecc6216..69a0c359318 100644 --- a/src/content/docs/logs/ui-data/timestamp-support.mdx +++ b/src/content/docs/logs/ui-data/timestamp-support.mdx @@ -206,7 +206,7 @@ You can use the `datetime` Grok type to parse timestamps in non-supported format MM/dd/yyyy HH:mm:ss.SSS - `%{GREEDYDATA:timestamp:datetime;MM/dd/yyyy HH:mm:ss.SSS}` + `%{DATA:timestamp:datetime;MM/dd/yyyy HH:mm:ss.SSS}` @@ -217,7 +217,7 @@ You can use the `datetime` Grok type to parse timestamps in non-supported format MM-dd-yyyy HH:mm:ss - `%{GREEDYDATA:timestamp:datetime;MM-dd-yyyy HH:mm:ss}` + `%{DATA:timestamp:datetime;MM-dd-yyyy HH:mm:ss}` @@ -225,10 +225,10 @@ You can use the `datetime` Grok type to parse timestamps in non-supported format 11/17/2023 09:55:25 AM - MM/dd/yyyy hh:mm:ss a + MM/dd/yyyy h:mm:ss a - `%{GREEDYDATA:timestamp:datetime;h:mm:ss a}` + `%{DATA:timestamp:datetime;MM/dd/yyyy h:mm:ss a}` diff --git a/src/content/docs/logs/ui-data/use-logs-ui.mdx b/src/content/docs/logs/ui-data/use-logs-ui.mdx index b229e8bfb61..d6313f14524 100644 --- a/src/content/docs/logs/ui-data/use-logs-ui.mdx +++ b/src/content/docs/logs/ui-data/use-logs-ui.mdx @@ -404,6 +404,16 @@ Depending on your New Relic subscription, you can access your logs from several Go to **[one.newrelic.com > All capabilities](https://one.newrelic.com/all-capabilities) > All entities > (select an entity) > Logs**. + + + + From your IDE + + + + [Install New Relic's CodeStream extension](/docs/codestream/start-here/install-codestream) to view logs in your IDE. + + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile-android/install-configure/android-agent-crash-reporting.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile-android/install-configure/android-agent-crash-reporting.mdx index d9c72099cad..e45f435a725 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile-android/install-configure/android-agent-crash-reporting.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile-android/install-configure/android-agent-crash-reporting.mdx @@ -61,6 +61,16 @@ To enable compressed map uploads, add the following to your app's `newrelic.prop com.newrelic.compressed_uploads=true ``` +#### Configure mapping upload host + +Add the following to your app's `newrelic.properties` file to send maps on a redirected server: + +```properties +# Please only provide the host name as the value without https:// at the beginning or / at the end, for example: xyz.api.com +com.newrelic.mapping_upload_host=xyz.api.com +``` + + ### Deferred crash reporting diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile-cordova-phonegap/get-started-with-cordova-monitoring.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile-cordova-phonegap/get-started-with-cordova-monitoring.mdx index 04858028a4e..dfa1f99ffcd 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile-cordova-phonegap/get-started-with-cordova-monitoring.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile-cordova-phonegap/get-started-with-cordova-monitoring.mdx @@ -86,8 +86,8 @@ Add the following optional configurations to the `--variable` argument, which wi * `CRASH_COLLECTOR_ADDRESS`: Specifies the authority component of the crash data upload URI. * `FEDRAMP_ENABLED`: Enable or disable reporting data using different endpoints for US government clients. * Possible values are `true` and `false`. Defaults to `false`. -* `OFFLINE_STORAGE_ENABLED`: Enable or disable offline data storage when no internet connection is available. . - * Possible values are true and false. Defaults to true. +* `OFFLINE_STORAGE_ENABLED`: Enable or disable offline data storage when no internet connection is available. + * Possible values are `true` and `false`. Defaults to `true`. These options are only available on Cordova agent v6.2.1 and higher. diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile-flutter/monitor-your-flutter-application.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile-flutter/monitor-your-flutter-application.mdx index 133f67a4c23..c23bc9b61f1 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile-flutter/monitor-your-flutter-application.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile-flutter/monitor-your-flutter-application.mdx @@ -109,7 +109,7 @@ import 'package:newrelic_mobile/newrelic_mobile.dart'; // Optional: Enable or disable reporting data using different endpoints for US government clients fedRampEnabled: false, // Optional: Enable or disable offline data storage when no internet connection is available. - offlineStorageEnabled:true + offlineStorageEnabled: true ); NewrelicMobile.instance.start(config, () { runApp(MyApp()); diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile-ionic-capacitor/get-started/introduction-new-relic-ionic-capacitor.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile-ionic-capacitor/get-started/introduction-new-relic-ionic-capacitor.mdx index 0cd1d113702..37c75e35a65 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile-ionic-capacitor/get-started/introduction-new-relic-ionic-capacitor.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile-ionic-capacitor/get-started/introduction-new-relic-ionic-capacitor.mdx @@ -111,7 +111,7 @@ let agentConfig : AgentConfiguration = { // Optional:Enable or disable reporting data using different endpoints for US government clients //fedRampEnabled: false // Optional: Enable or disable offline data storage when no internet connection is available. - offlineStorageEnabled:true + offlineStorageEnabled: true } NewRelicCapacitorPlugin.start({appKey:appToken, agentConfiguration:agentConfig}) ``` @@ -142,7 +142,6 @@ Make sure you paste your application token(s) into `appToken = ""` in the code a ```groovy apply plugin: "com.android.application" apply plugin: 'newrelic' // <-- add this - ``` 4. Make sure your app requests `INTERNET` and `ACCESS_NETWORK_STATE` permissions by adding these lines to your `AndroidManifest.xml`: ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings.mdx index ade01cd2138..5489cd4b4d9 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings.mdx @@ -18,9 +18,11 @@ redirects: - /docs/mobile-monitoring/new-relic-mobile-ios/ios-sdk-api/disable-features - /docs/mobile-monitoring/new-relic-mobile-ios/ios-sdk-api/enable-features - /docs/mobile-monitoring/new-relic-mobile/mobile-sdk/enable-disable-features -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-14 --- +import mobileUnityEditorUI from 'images/mobile_screenshot-crop_Unity-editor-UI.webp' + @@ -44,6 +46,9 @@ freshnessValidatedDate: never React Native + + Unity + Xamarin @@ -51,8 +56,6 @@ freshnessValidatedDate: never -## Description [#description] - Use the methods below to change default mobile monitoring settings. All settings, including the call to invoke the agent, are called in the `onCreate` method of the `MainActivity` class. To change settings, you have two options (if the setting supports it): @@ -1029,7 +1032,7 @@ let agentConfig : AgentConfiguration = { crashCollectorAddress: "", sendConsoleEvents: false, fedRampEnabled: false, - offlineStorageEnabled:false + offlineStorageEnabled: false } NewRelicCapacitorPlugin.start({appKey:appToken, agentConfiguration:agentConfig}) @@ -1196,14 +1199,14 @@ NewRelicCapacitorPlugin.start({appKey:appToken, agentConfiguration:agentConfig}) - Enable or disable offline data storage when no internet connection is available. + Enable or disable offline data storage when no internet connection is available. To configure the amount of offline storage, see [Set max offline storage size](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage/#capacitor). - Offline storage is Enabled by default. To disable it, add the following feature flag: + Offline storage is enabled by default. To disable it, add the following feature flag: ```typescript - offlineStorageEnabled:false + offlineStorageEnabled: false ``` @@ -1377,10 +1380,10 @@ cordova plugin add https://github.com/newrelic/newrelic-cordova-plugin.git - Enable or disable offline data storage when no internet connection is available. + Enable or disable offline data storage when no internet connection is available. To configure the amount of offline storage, see [Set max offline storage size](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage/#cordova). - Offline storage is Enabled by default. To disable it, add the following feature flag: + Offline storage is enabled by default. To disable it, add the following feature flag: ```typescript FEDRAMP_ENABLED = "false" @@ -1412,8 +1415,8 @@ Here's a sample configuration: CrossNewRelic.Current.TrackShellNavigatedEvents(); // Set optional agent configuration - // Options are: crashReportingEnabled, loggingEnabled, logLevel, collectorAddress, crashCollectorAddress,analyticsEventEnabled, networkErrorRequestEnabled, networkRequestEnabled, interactionTracingEnabled,webViewInstrumentation, fedRampEnabled,offlineStorageEnabled - AgentStartConfiguration agentConfig = new AgentStartConfiguration(crashReportingEnabled:false,offlineStorageEnabled:false); + // Options are: crashReportingEnabled, loggingEnabled, logLevel, collectorAddress, crashCollectorAddress,analyticsEventEnabled, networkErrorRequestEnabled, networkRequestEnabled, interactionTracingEnabled,webViewInstrumentation, fedRampEnabled, offlineStorageEnabled + AgentStartConfiguration agentConfig = new AgentStartConfiguration(crashReportingEnabled: false, offlineStorageEnabled: false); if (DeviceInfo.Current.Platform == DevicePlatform.Android) { @@ -1565,13 +1568,13 @@ Here's a sample configuration: - Enable or disable offline data storage when no internet connection is available. + Enable or disable offline data storage when no internet connection is available. To configure the amount of offline storage, see [Set max offline storage size](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage/#maui). - Offline storage is Enabled by default. To disable it, add the following feature flag: + Offline storage is enabled by default. To disable it, add the following feature flag: ```csharp - offlineStorageEnabled:false + offlineStorageEnabled: false ``` @@ -1604,8 +1607,8 @@ Config config = Config( loggingEnabled: false, webViewInstrumentation: false, printStatementAsEventsEnabled: false, - httpInstrumentationEnabled:false, - offlineStorageEnabled:true); + httpInstrumentationEnabled: false, + offlineStorageEnabled: true); // NewrelicMobile.instance.start(config, () { // runApp(MyApp()); @@ -1770,13 +1773,13 @@ runZonedGuarded(() async { - Enable or disable offline data storage when no internet connection is available. + Enable or disable offline data storage when no internet connection is available. To configure the amount of offline storage, see [Set max offline storage size](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage/#flutter). - Offline storage is Enabled by default. To disable it, add the following feature flag: + Offline storage is enabled by default. To disable it, add the following feature flag: ```typescript - offlineStorageEnabled:false + offlineStorageEnabled: false ``` @@ -1990,19 +1993,40 @@ AppRegistry.registerComponent(appName, () => App); - Enable or disable offline data storage when no internet connection is available. + Enable or disable offline data storage when no internet connection is available. To configure the amount of offline storage, see [Set max offline storage size](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage/#react). - Offline storage is Enabled by default. To disable it, add the following feature flag: + Offline storage is enabled by default. To disable it, add the following feature flag: ```typescript - offlineStorageEnabled:false + offlineStorageEnabled: false ``` + + +New Relic offers default settings for mobile app monitoring in Unity. You can easily adjust these settings within the Unity editor to fit your specific needs. + +To configure these settings: + +1. Launch the Unity Editor and open your Unity project. + +2. From the menu bar, select **Tools > New Relic > Getting Started > New Relic Configuration**. + +3. The left-hand **Inspector** window displays a list of default settings. Simply check the box next to a setting to enable it, or uncheck the box to disable it. + + Screenshot of the Unity editor to configure settings + +4. Click **Add component**. + + The Xamarin agent SDK allows you to configure default settings to change the behavior of the agent. @@ -2024,8 +2048,8 @@ public App () CrossNewRelicClient.Current.TrackShellNavigatedEvents(); // Set optional agent configuration - // Options are: crashReportingEnabled, loggingEnabled, logLevel, collectorAddress, crashCollectorAddress,analyticsEventEnabled, networkErrorRequestEnabled, networkRequestEnabled, interactionTracingEnabled,webViewInstrumentation, fedRampEnabled,offlineStorageEnabled - AgentStartConfiguration agentConfig = new AgentStartConfiguration(crashReportingEnabled:false,offlineStorageEnabled:false); + // Options are: crashReportingEnabled, loggingEnabled, logLevel, collectorAddress, crashCollectorAddress,analyticsEventEnabled, networkErrorRequestEnabled, networkRequestEnabled, interactionTracingEnabled,webViewInstrumentation, fedRampEnabled, offlineStorageEnabled + AgentStartConfiguration agentConfig = new AgentStartConfiguration(crashReportingEnabled: false, offlineStorageEnabled: false); if (Device.RuntimePlatform == Device.Android) { @@ -2173,13 +2197,13 @@ public App () - Enable or disable offline data storage when no internet connection is available. + Enable or disable offline data storage when no internet connection is available. To configure the amount of offline storage, see [Set max offline storage size](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage/#xamarin). - Offline storage is Enabled by default. To disable it, add the following feature flag: + Offline storage is enabled by default. To disable it, add the following feature flag: ```typescript - offlineStorageEnabled:false + offlineStorageEnabled: false ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute.mdx index 64e297eea99..b1d2e650ce9 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute.mdx @@ -39,12 +39,12 @@ freshnessValidatedDate: 2023-07-21 React Native - - Xamarin - Unity + + Xamarin + @@ -639,16 +639,16 @@ Creates a session-level attribute shared by multiple mobile event types. Overwri ``` - + ## Syntax [#syntax] ```csharp -SetAttribute(string name, string value) : bool; +setAttribute(string name, string value) : bool; -SetAttribute(string name, double value) : bool; +setAttribute(string name, double value) : bool; -SetAttribute(string name, bool value) : bool; +setAttribute(string name, bool value) : bool; ``` ## Description [#description] @@ -707,23 +707,22 @@ Creates a session-level attribute shared by multiple mobile event types. Overwri ## Example [#example] ``` csharp - CrossNewRelicClient.Current.SetAttribute("XamarinBoolAttr", false); - CrossNewRelicClient.Current.SetAttribute("XamarinStrAttr", "Cat"); - CrossNewRelicClient.Current.SetAttribute("XamarinNumAttr", 13.5); + NewRelicAgent.SetAttribute("UnityBoolCustomAttr", false); + NewRelicAgent.SetAttribute("UnityStringCustomAttr", "Cat"); + NewRelicAgent.SetAttribute('UnityCustomAttrNumber', 37); ``` - - - + + ## Syntax [#syntax] ```csharp -setAttribute(string name, string value) : bool; +SetAttribute(string name, string value) : bool; -setAttribute(string name, double value) : bool; +SetAttribute(string name, double value) : bool; -setAttribute(string name, bool value) : bool; +SetAttribute(string name, bool value) : bool; ``` ## Description [#description] @@ -782,12 +781,12 @@ Creates a session-level attribute shared by multiple mobile event types. Overwri ## Example [#example] ``` csharp - NewRelicAgent.SetAttribute("UnityBoolCustomAttr", false); - NewRelicAgent.SetAttribute("UnityStringCustomAttr", "Cat"); - NewRelicAgent.SetAttribute('UnityCustomAttrNumber', 37); + CrossNewRelicClient.Current.SetAttribute("XamarinBoolAttr", false); + CrossNewRelicClient.Current.SetAttribute("XamarinStrAttr", "Cat"); + CrossNewRelicClient.Current.SetAttribute("XamarinNumAttr", 13.5); ``` - + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/current-session-id.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/current-session-id.mdx index 00d8d8599ad..439de80f467 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/current-session-id.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/current-session-id.mdx @@ -39,12 +39,12 @@ freshnessValidatedDate: 2023-07-20 React Native - - Xamarin - Unity + + Xamarin + @@ -243,12 +243,12 @@ Returns ID string for the current session. ``` - + ## Syntax [#syntax] ```csharp -CurrentSessionId() : string; +currentSessionId() : string; ``` ## Description [#description] @@ -262,17 +262,16 @@ Returns ID string for the current session. ## Example [#example] ``` csharp -string sessionId = CrossNewRelic.Current.CurrentSessionId(); + string sessionId = NewRelicAgent.CurrentSessionId(); ``` - - - + + ## Syntax [#syntax] ```csharp -currentSessionId() : string; +CurrentSessionId() : string; ``` ## Description [#description] @@ -286,9 +285,9 @@ Returns ID string for the current session. ## Example [#example] ``` csharp - string sessionId = NewRelicAgent.CurrentSessionId(); +string sessionId = CrossNewRelic.Current.CurrentSessionId(); ``` - + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/increment-session-attribute-count.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/increment-session-attribute-count.mdx index 4c4cdb80e6d..ab405fa1534 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/increment-session-attribute-count.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/increment-session-attribute-count.mdx @@ -38,12 +38,13 @@ freshnessValidatedDate: 2023-07-20 React Native + + Unity + Xamarin - - Unity - + @@ -629,12 +630,12 @@ Returns `true` if the event is recorded successfully, or `false` if not. ``` - + ## Syntax [#syntax] ```csharp -IncrementAttribute(string name, float value = 1) : bool; +incrementAttribute(string name, float value = 1) : bool; ``` ## Description [#description] @@ -699,19 +700,18 @@ Returns `true` if the event is recorded successfully, or `false` if not. ``` csharp // Increment by 1 - CrossNewRelicClient.Current.IncrementAttribute("XamarinNumAttr"); + NewRelicAgent.IncrementAttribute('UnityCustomAttrNumber'); // Increment by value - CrossNewRelicClient.Current.IncrementAttribute("XamarinNumAttr", 12.3); + NewRelicAgent.IncrementAttribute('UnityCustomAttrNumber', 5); ``` - - - + + ## Syntax [#syntax] ```csharp -incrementAttribute(string name, float value = 1) : bool; +IncrementAttribute(string name, float value = 1) : bool; ``` ## Description [#description] @@ -776,11 +776,12 @@ Returns `true` if the event is recorded successfully, or `false` if not. ``` csharp // Increment by 1 - NewRelicAgent.IncrementAttribute('UnityCustomAttrNumber'); + CrossNewRelicClient.Current.IncrementAttribute("XamarinNumAttr"); // Increment by value - NewRelicAgent.IncrementAttribute('UnityCustomAttrNumber', 5); + CrossNewRelicClient.Current.IncrementAttribute("XamarinNumAttr", 12.3); ``` - + + \ No newline at end of file diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/network-request-success.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/network-request-success.mdx index 21f72041e84..27ee1ee8c5c 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/network-request-success.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/network-request-success.mdx @@ -36,12 +36,12 @@ freshnessValidatedDate: 2023-07-19 React Native - - Xamarin - Unity + + Xamarin + @@ -1286,12 +1286,12 @@ Tracks network requests manually. You can use this method to record HTTP transac ``` - + ## Syntax [#syntax] ```csharp -NoticeHttpTransaction(string url, string httpMethod, int statusCode, long startTime,long endTime, long bytesSent, long bytesReceived, string responseBody): void; +noticeHttpTransaction(string url, string httpMethod, int statusCode, long startTime,long endTime, long bytesSent, long bytesReceived, string responseBody): void; ``` ## Description [#description] @@ -1435,26 +1435,21 @@ Tracks network requests manually. You can use this method to record HTTP transac ## Example [#example] ``` csharp - CrossNewRelicClient.Current.NoticeHttpTransaction( - "https://newrelic.com", - "GET", - 200, - DateTimeOffset.Now.ToUnixTimeMilliseconds(), - DateTimeOffset.Now.ToUnixTimeMilliseconds() + 100, - 0, - 1000, - "" - ); + NewRelicAgent.NoticeHttpTransaction('https://github.com', + 'GET', 200, + DateTimeOffset.Now.ToUnixTimeMilliseconds(), + DateTimeOffset.Now.ToUnixTimeMilliseconds()+1000, + 100, 101, "response body", + null); ``` - - + ## Syntax [#syntax] ```csharp -noticeHttpTransaction(string url, string httpMethod, int statusCode, long startTime,long endTime, long bytesSent, long bytesReceived, string responseBody): void; +NoticeHttpTransaction(string url, string httpMethod, int statusCode, long startTime,long endTime, long bytesSent, long bytesReceived, string responseBody): void; ``` ## Description [#description] @@ -1598,16 +1593,21 @@ Tracks network requests manually. You can use this method to record HTTP transac ## Example [#example] ``` csharp - NewRelicAgent.NoticeHttpTransaction('https://github.com', - 'GET', 200, - DateTimeOffset.Now.ToUnixTimeMilliseconds(), - DateTimeOffset.Now.ToUnixTimeMilliseconds()+1000, - 100, 101, "response body", - null); + CrossNewRelicClient.Current.NoticeHttpTransaction( + "https://newrelic.com", + "GET", + 200, + DateTimeOffset.Now.ToUnixTimeMilliseconds(), + DateTimeOffset.Now.ToUnixTimeMilliseconds() + 100, + 0, + 1000, + "" + ); ``` + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-breadcrumb.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-breadcrumb.mdx index b8d19df0331..33fd063e8f8 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-breadcrumb.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-breadcrumb.mdx @@ -37,13 +37,13 @@ freshnessValidatedDate: 2023-07-18 React Native - - - Xamarin Unity + + Xamarin + @@ -636,7 +636,7 @@ Returns `true` if the event is recorded successfully, or `false` if not. ``` - + ## Syntax [#syntax] @@ -648,7 +648,7 @@ RecordBreadcrumb(string name, Dictionary attributes): bool; This call creates and records a `MobileBreadcrumb` event, which can be queried with NRQL and in the [crash event trail](/docs/mobile-monitoring/mobile-monitoring-ui/crashes/mobile-crash-event-trail). Mobile breadcrumbs are useful for crash analysis; they should be created for app activity that may be helpful for troubleshooting crashes. -In addition to whatever custom attributes you choose, the event will also have associated [session attributes](/docs/insights/explore-data/attributes/mobile-default-attributes-insights#mobile-list). Unlike with using [`setAttribute`](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute/), adding attributes to a breadcrumb event adds them only to that event; they are not session attributes. +In addition to whatever custom attributes you choose, the event will also have associated [session attributes](/docs/insights/explore-data/attributes/mobile-default-attributes-insights#mobile-list). Unlike with using [`setAttribute`](docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute/), adding attributes to a breadcrumb event adds them only to that event; they are not session attributes. ## Parameters [#parameters] @@ -705,18 +705,14 @@ Returns `true` if the event is recorded successfully, or `false` if not. ## Example [#example] ``` csharp - CrossNewRelicClient.Current.RecordBreadcrumb("XamarinExampleBreadcrumb", new Dictionary() - { - {"BreadNumValue", 12.3 }, - {"BreadStrValue", "XamBread" }, - {"BreadBoolValue", true } - } - ); -``` + Dictionary dic = new Dictionary(); + dic.Add("Unity Attribute", "Data1"); - + NewRelicAgent.RecordBreadCrumb("Unity BreadCrumb Example", dic); +``` - + + ## Syntax [#syntax] @@ -728,7 +724,7 @@ RecordBreadcrumb(string name, Dictionary attributes): bool; This call creates and records a `MobileBreadcrumb` event, which can be queried with NRQL and in the [crash event trail](/docs/mobile-monitoring/mobile-monitoring-ui/crashes/mobile-crash-event-trail). Mobile breadcrumbs are useful for crash analysis; they should be created for app activity that may be helpful for troubleshooting crashes. -In addition to whatever custom attributes you choose, the event will also have associated [session attributes](/docs/insights/explore-data/attributes/mobile-default-attributes-insights#mobile-list). Unlike with using [`setAttribute`](docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute/), adding attributes to a breadcrumb event adds them only to that event; they are not session attributes. +In addition to whatever custom attributes you choose, the event will also have associated [session attributes](/docs/insights/explore-data/attributes/mobile-default-attributes-insights#mobile-list). Unlike with using [`setAttribute`](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/create-attribute/), adding attributes to a breadcrumb event adds them only to that event; they are not session attributes. ## Parameters [#parameters] @@ -785,12 +781,15 @@ Returns `true` if the event is recorded successfully, or `false` if not. ## Example [#example] ``` csharp - Dictionary dic = new Dictionary(); - dic.Add("Unity Attribute", "Data1"); - - NewRelicAgent.RecordBreadCrumb("Unity BreadCrumb Example", dic); + CrossNewRelicClient.Current.RecordBreadcrumb("XamarinExampleBreadcrumb", new Dictionary() + { + {"BreadNumValue", 12.3 }, + {"BreadStrValue", "XamBread" }, + {"BreadBoolValue", true } + } + ); ``` - + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-events.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-events.mdx index 701c60b7396..834b1e2d4c1 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-events.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-events.mdx @@ -39,13 +39,13 @@ freshnessValidatedDate: 2023-07-18 React Native - - - Xamarin Unity - + + + Xamarin + @@ -793,12 +793,12 @@ Returns `true` if the event is recorded successfully, or `false` if not. ``` - + ## Syntax [#syntax] ```csharp -RecordCustomEvent(string eventType, string eventName, Dictionary attributes): bool; +RecordCustomEvent(string name, Dictionary attributes): bool; ``` ## Description [#description] @@ -827,7 +827,7 @@ Creates and records a [custom event](/docs/insights/new-relic-insights/adding-qu - `eventType` + `name` @@ -835,13 +835,75 @@ Creates and records a [custom event](/docs/insights/new-relic-insights/adding-qu - Required. The type of event. Do not use `$eventType` to name your custom events; use a custom attribute or the optional `name` parameter. + Use this parameter to name the event. Using this parameter is equivalent to creating a `name` parameter. + + + `attributes` + + + + `Dictionary` + + + + Optional. A dictionary of key-value pairs that can be used to provide additional information about the custom event. + + + + + +## Return values [#return-values] + +Returns `true` if the event is recorded successfully, or `false` if not. + +## Example [#example] + +``` csharp + Dictionary dic = new Dictionary(); + dic.Add("Unity Custom Attribute", "Data2"); + + NewRelicAgent.RecordCustomEvent("Unity Custom Event Example", dic); +``` + + + + +## Syntax [#syntax] + +```csharp +RecordCustomEvent(string name, Dictionary attributes): bool; +``` + +## Description [#description] + +Creates and records a [custom event](/docs/insights/new-relic-insights/adding-querying-data/custom-attributes-events-new-relic-mobile#What-are-events-in-New-Relic-Mobile), for use in NRQL. + +## Parameters [#parameters] + + + + + + + + + + + + + @@ -876,23 +938,19 @@ Returns `true` if the event is recorded successfully, or `false` if not. ## Example [#example] ``` csharp - CrossNewRelicClient.Current.RecordCustomEvent("XamarinCustomEvent", "XamarinCustomEventCategory", new Dictionary() - { - {"BreadNumValue", 12.3 }, - {"BreadStrValue", "XamBread" }, - {"BreadBoolValue", true } - } - ); + Dictionary dic = new Dictionary(); + dic.Add("Unity Custom Attribute", "Data2"); + + NewRelicAgent.RecordCustomEvent("Unity Custom Event Example", dic); ``` - - + ## Syntax [#syntax] ```csharp -RecordCustomEvent(string name, Dictionary attributes): bool; +RecordCustomEvent(string eventType, string eventName, Dictionary attributes): bool; ``` ## Description [#description] @@ -921,7 +979,7 @@ Creates and records a [custom event](/docs/insights/new-relic-insights/adding-qu + + + + + + + + @@ -956,10 +1028,13 @@ Returns `true` if the event is recorded successfully, or `false` if not. ## Example [#example] ``` csharp - Dictionary dic = new Dictionary(); - dic.Add("Unity Custom Attribute", "Data2"); - - NewRelicAgent.RecordCustomEvent("Unity Custom Event Example", dic); + CrossNewRelicClient.Current.RecordCustomEvent("XamarinCustomEvent", "XamarinCustomEventCategory", new Dictionary() + { + {"BreadNumValue", 12.3 }, + {"BreadStrValue", "XamBread" }, + {"BreadBoolValue", true } + } + ); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-metrics.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-metrics.mdx index 5cb262ce958..f80badc0fe7 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-metrics.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-custom-metrics.mdx @@ -35,12 +35,12 @@ freshnessValidatedDate: 2023-07-20 React Native - - Xamarin - Unity + + Xamarin + @@ -998,14 +998,14 @@ Supported measurements for `countUnit` and `valueUnit` are: ``` - + ## Syntax [#syntax] ```csharp -RecordMetric(string name, string category) : void; -RecordMetric(string name, string category, double value) : void; -RecordMetric(string name, string category, double value, MetricUnit countUnit, MetricUnit valueUnit) : void; +RecordMetricWithName(string name, string category) : void; +RecordMetricWithName(string name, string category, double value) : void; +RecordMetricWithName(string name, string category, double value, MetricUnit countUnit, MetricUnit valueUnit) : void; ``` ## Description [#description] @@ -1114,21 +1114,20 @@ Supported measurements for `countUnit` and `valueUnit` are: ## Example [#example] ``` csharp - CrossNewRelicClient.Current.RecordMetric("Agent start", "Lifecycle"); - CrossNewRelicClient.Current.RecordMetric("Login Auth Metric", "Network", 78.9); - CrossNewRelicClient.Current.RecordMetric("Request Metric", "Network", 20, MetricUnit.SECONDS, MetricUnit.OPERATIONS); + NewRelicAgent.RecordMetricWithName('UnityCustomMetricName', 'UnityCustomMetricCategory'); + NewRelicAgent.RecordMetricWithName('UnityCustomMetricName', 'UnityCustomMetricCategory', 12); + NewRelicAgent.RecordMetricWithName('UnityCustomMetricName', 'UnityCustomMetricCategory', 13, NewRelicAgent.MetricUnit.PERCENT, NewRelicAgent.MetricUnit.SECONDS); ``` - - + ## Syntax [#syntax] ```csharp -RecordMetricWithName(string name, string category) : void; -RecordMetricWithName(string name, string category, double value) : void; -RecordMetricWithName(string name, string category, double value, MetricUnit countUnit, MetricUnit valueUnit) : void; +RecordMetric(string name, string category) : void; +RecordMetric(string name, string category, double value) : void; +RecordMetric(string name, string category, double value, MetricUnit countUnit, MetricUnit valueUnit) : void; ``` ## Description [#description] @@ -1237,9 +1236,9 @@ Supported measurements for `countUnit` and `valueUnit` are: ## Example [#example] ``` csharp - NewRelicAgent.RecordMetricWithName('UnityCustomMetricName', 'UnityCustomMetricCategory'); - NewRelicAgent.RecordMetricWithName('UnityCustomMetricName', 'UnityCustomMetricCategory', 12); - NewRelicAgent.RecordMetricWithName('UnityCustomMetricName', 'UnityCustomMetricCategory', 13, NewRelicAgent.MetricUnit.PERCENT, NewRelicAgent.MetricUnit.SECONDS); + CrossNewRelicClient.Current.RecordMetric("Agent start", "Lifecycle"); + CrossNewRelicClient.Current.RecordMetric("Login Auth Metric", "Network", 78.9); + CrossNewRelicClient.Current.RecordMetric("Request Metric", "Network", 20, MetricUnit.SECONDS, MetricUnit.OPERATIONS); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-handled-exceptions.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-handled-exceptions.mdx index 8a77da24150..bb3f26a12ac 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-handled-exceptions.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/record-handled-exceptions.mdx @@ -26,13 +26,13 @@ freshnessValidatedDate: 2023-07-20 .NET MAUI - - - Xamarin Unity + + Xamarin + @@ -406,7 +406,7 @@ Returns `true` if the event is recorded successfully, or `false` if not. ``` - + ## Syntax [#syntax] @@ -464,12 +464,11 @@ Returns `true` if the event is recorded successfully, or `false` if not. try { some_code_that_throws_error(); } catch (Exception ex) { - CrossNewRelicClient.Current.RecordException(ex); + NewRelicAgent.RecordException(e); } ``` - - + ## Syntax [#syntax] @@ -527,9 +526,10 @@ Returns `true` if the event is recorded successfully, or `false` if not. try { some_code_that_throws_error(); } catch (Exception ex) { - NewRelicAgent.RecordException(e); + CrossNewRelicClient.Current.RecordException(ex); } ``` + \ No newline at end of file diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-all-attributes.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-all-attributes.mdx index 7f30747bd42..03d3f88a3cb 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-all-attributes.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-all-attributes.mdx @@ -35,13 +35,13 @@ freshnessValidatedDate: 2023-07-21 React Native - - - Xamarin Unity + + Xamarin + @@ -209,7 +209,7 @@ This method removes all attributes from a session. If you want to remove a singl ``` - + ## Syntax [#syntax] @@ -222,15 +222,14 @@ RemoveAllAttributes() : bool; This method removes all attributes from a session. If you want to remove a single session attribute, see [Remove an attribute](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-all-attributes). - ## Example [#example] ``` csharp - CrossNewRelicClient.Current.RemoveAllAttributes(); + NewRelicAgent.RemoveAllAttributes(); ``` - + ## Syntax [#syntax] @@ -243,10 +242,11 @@ RemoveAllAttributes() : bool; This method removes all attributes from a session. If you want to remove a single session attribute, see [Remove an attribute](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-all-attributes). + ## Example [#example] ``` csharp - NewRelicAgent.RemoveAllAttributes(); + CrossNewRelicClient.Current.RemoveAllAttributes(); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-attribute.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-attribute.mdx index 4b6ec5e3926..61c436bd988 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-attribute.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/remove-attribute.mdx @@ -39,11 +39,11 @@ freshnessValidatedDate: 2023-07-21 React Native + + Unity + Xamarin - - - Unity @@ -482,7 +482,7 @@ This method removes the attribute specified by the name string. If you want to r ``` - + ## Syntax [#syntax] @@ -534,11 +534,11 @@ This method removes the attribute specified by the name string. If you want to r ## Example [#example] ``` csharp - CrossNewRelicClient.Current.RemoveAttribute("XamarinNumAttr"); + NewRelicAgent.RemoveAttribute("UnityCustomAttrNumber"); ``` - + ## Syntax [#syntax] @@ -590,7 +590,7 @@ This method removes the attribute specified by the name string. If you want to r ## Example [#example] ``` csharp - NewRelicAgent.RemoveAttribute("UnityCustomAttrNumber"); + CrossNewRelicClient.Current.RemoveAttribute("XamarinNumAttr"); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-custom-user-id.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-custom-user-id.mdx index 4ff74663d57..5940692cbb3 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-custom-user-id.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-custom-user-id.mdx @@ -38,13 +38,13 @@ freshnessValidatedDate: 2023-07-20 React Native - - - Xamarin Unity + + Xamarin + @@ -509,7 +509,7 @@ Set a custom user identifier value to associate user sessions with analytics eve ``` - + ## Syntax [#syntax] @@ -561,11 +561,11 @@ Set a custom user identifier value to associate user sessions with analytics eve ## Example [#example] ``` csharp - CrossNewRelicClient.Current.SetUserId("User123"); + NewRelicAgent.SetUserId("User123"); ``` - + ## Syntax [#syntax] @@ -617,7 +617,7 @@ Set a custom user identifier value to associate user sessions with analytics eve ## Example [#example] ``` csharp - NewRelicAgent.SetUserId("User123"); + CrossNewRelicClient.Current.SetUserId("User123"); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-buffer-time.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-buffer-time.mdx index 4c087538028..2813997d3bc 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-buffer-time.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-buffer-time.mdx @@ -38,13 +38,13 @@ freshnessValidatedDate: 2023-07-20 React Native - - - Xamarin Unity + + Xamarin + @@ -512,7 +512,7 @@ Sets the event harvest cycle length. Default is 600 seconds (10 minutes). Minimu ``` - + ## Syntax [#syntax] @@ -564,11 +564,11 @@ Sets the event harvest cycle length. Default is 600 seconds (10 minutes). Minimu ## Example [#example] ``` csharp - CrossNewRelicClient.Current.SetMaxEventBufferTime(200); + NewRelicAgent.SetMaxEventBufferTime(200); ``` - + ## Syntax [#syntax] @@ -620,7 +620,7 @@ Sets the event harvest cycle length. Default is 600 seconds (10 minutes). Minimu ## Example [#example] ``` csharp - NewRelicAgent.SetMaxEventBufferTime(200); + CrossNewRelicClient.Current.SetMaxEventBufferTime(200); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-pool-size.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-pool-size.mdx index ad259a7f4e8..001971a70a3 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-pool-size.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-event-pool-size.mdx @@ -41,12 +41,12 @@ freshnessValidatedDate: 2023-07-20 React Native - - Xamarin - Unity + + Xamarin + @@ -492,7 +492,7 @@ Sets the maximum size of the event pool stored in memory until the next harvest ``` - + ## Syntax [#syntax] @@ -544,12 +544,11 @@ Sets the maximum size of the event pool stored in memory until the next harvest ## Example [#example] ``` csharp - CrossNewRelicClient.Current.SetMaxEventPoolSize(1500); + NewRelicAgent.SetMaxEventPoolSize(1500); ``` - - + ## Syntax [#syntax] @@ -601,9 +600,10 @@ Sets the maximum size of the event pool stored in memory until the next harvest ## Example [#example] ``` csharp - NewRelicAgent.SetMaxEventPoolSize(1500); + CrossNewRelicClient.Current.SetMaxEventPoolSize(1500); ``` + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage.mdx index 62c4254a7d6..6d99f89e79f 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/set-max-offline-storage.mdx @@ -40,25 +40,25 @@ This method controls the maximum amount of offline storage that can be stored in React Native - - Xamarin - Unity + + Xamarin + -Offline storage is enabled by default. To enable it, add the following feature flag: +Offline storage is disabled by default. To enable it, add the following feature flag: ```java NewRelic.disableFeature(FeatureFlag.OfflineStorage) ``` -For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/). +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#android). ## Syntax [#syntax] @@ -135,7 +135,7 @@ Offline storage is disabled by default. To enable it, add the following feature NewRelic.enableFeatures(NRMAFeatureFlags.NRFeatureFlag_OfflineStorage) ``` -For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings). +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#ios). ## Syntax [#syntax] @@ -205,8 +205,14 @@ let poolSizeSet = NewRelic.setMaxOfflineStorageSize(1000) - -Offline storage is enabled by default but can be disabled with a feature flag using the agent configuration. + +Offline storage is enabled by default. If you need to disable it, add the following feature flag: + +```typescript +offlineStorageEnabled: false +``` + +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#capacitor). ## Syntax [#syntax] @@ -261,15 +267,20 @@ setMaxOfflineStorageSize(options: { megabytes: number; }) => void - -Offline storage is enabled by default but can be disabled with a feature flag using the agent configuration. + +Offline storage is enabled by default. If you need to disable it, add the following feature flag: + +```typescript +OFFLINE_STORAGE_ENABLED = "false" +``` + +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#cordova). ## Syntax [#syntax] ```typescript setMaxOfflineStorageSize(megabytes: number): void; - ``` @@ -319,6 +330,16 @@ setMaxOfflineStorageSize(megabytes: number): void; + +Offline storage is enabled by default. If you need to disable it, add the following feature flag: + +```csharp +offlineStorageEnabled: false +``` + +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#maui). + + ## Syntax [#syntax] ```csharp @@ -371,6 +392,16 @@ setMaxOfflineStorageSize(int megabytes): void; + +Offline storage is enabled by default. If you need to disable it, add the following feature flag: + +```typescript +offlineStorageEnabled: false +``` + +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#flutter). + + ## Syntax [#syntax] ```typescript @@ -423,6 +454,16 @@ NewrelicMobile.instance.setMaxOfflineStorageSize(200); + +Offline storage is enabled by default. If you need to disable it, add the following feature flag: + +```typescript +offlineStorageEnabled: false +``` + +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#react). + + ## Syntax [#syntax] ```js @@ -474,14 +515,19 @@ setMaxOfflineStorageSize(megabytes: number): void; ``` - + + + +Offline storage is enabled by default. If you need to disable it, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#unity). + ## Syntax [#syntax] ```csharp -SetMaxOfflineStorageSize(int megabytes): void; +SetMaxOfflineStorageSize(int maxOfflineStorageSize): void; ``` + ## Parameters [#parameters]
+ Parameter + + Type + + Description +
- `eventName` + `name` @@ -849,7 +911,7 @@ Creates and records a [custom event](/docs/insights/new-relic-insights/adding-qu - Optional. Use this parameter to name the event. Using this parameter is equivalent to creating a `name` parameter. + Use this parameter to name the event. Using this parameter is equivalent to creating a `name` parameter.
- `name` + `eventType` @@ -929,7 +987,21 @@ Creates and records a [custom event](/docs/insights/new-relic-insights/adding-qu - Use this parameter to name the event. Using this parameter is equivalent to creating a `name` parameter. + Required. The type of event. Do not use `$eventType` to name your custom events; use a custom attribute or the optional `name` parameter. +
+ `eventName` + + `string` + + Optional. Use this parameter to name the event. Using this parameter is equivalent to creating a `name` parameter.
@@ -505,7 +551,7 @@ SetMaxOfflineStorageSize(int megabytes): void; @@ -522,20 +568,27 @@ SetMaxOfflineStorageSize(int megabytes): void; ## Example [#example] ``` csharp - CrossNewRelicClient.Current.SetMaxOfflineStorageSize(200); + NewRelicAgent.SetMaxOfflineStorageSize(200); ``` - + - + +Offline storage is enabled by default. If you need to disable it, add the following feature flag: + +```typescript +offlineStorageEnabled: false +``` + +For details on adding feature flags, see [Configure mobile monitoring settings](/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/configure-settings/#xamarin). + ## Syntax [#syntax] ```csharp -SetMaxEventPoolSize(int maxPoolSize): void; +SetMaxOfflineStorageSize(int megabytes): void; ``` - ## Parameters [#parameters]
- `megabytes` + `maxOfflineStorageSize` @@ -513,7 +559,7 @@ SetMaxOfflineStorageSize(int megabytes): void; - Required. Maximum size in megaBytes that can be stored in the file system.. + Required. Maximum size in megaBytes that can be stored in the file system.
@@ -559,7 +612,7 @@ SetMaxEventPoolSize(int maxPoolSize): void; @@ -576,8 +629,9 @@ SetMaxEventPoolSize(int maxPoolSize): void; ## Example [#example] ``` csharp - NewRelicAgent.SetMaxOfflineStorageSize(200); + CrossNewRelicClient.Current.SetMaxOfflineStorageSize(200); ``` + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/start-interaction.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/start-interaction.mdx index 12faca7c09b..8538b2e2a13 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/start-interaction.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/start-interaction.mdx @@ -42,12 +42,12 @@ freshnessValidatedDate: 2023-07-20 React Native - - Xamarin - Unity + + Xamarin + @@ -577,12 +577,12 @@ Track a method as an interaction. ``` - + ## Syntax [#syntax] ```csharp -StartInteraction(string interactionName): string; +StartInteractionWithName((string interactionName): string; ``` ## Description [#description] @@ -627,31 +627,24 @@ Track a method as an interaction. ## Example [#example] ``` csharp - HttpClient myClient = new HttpClient(CrossNewRelicClient.Current.GetHttpMessageHandler()); - - string interactionId = CrossNewRelicClient.Current.StartInteraction("Getting data from service"); + string interActionId = NewRelicAgent.StartInteractionWithName("Unity InterAction Example"); - var response = await myClient.GetAsync(new Uri("https://jsonplaceholder.typicode.com/todos/1")); - if (response.IsSuccessStatusCode) - { - var content = await response.Content.ReadAsStringAsync(); - } else - { - Console.WriteLine("Unsuccessful response code"); - } + for(int i =0; i < 4;i++) + { + Thread.Sleep(1000); + } - CrossNewRelicClient.Current.EndInteraction(interactionId); + NewRelicAgent.StopCurrentInteraction(interActionId); ``` - - + ## Syntax [#syntax] ```csharp -StartInteractionWithName((string interactionName): string; +StartInteraction(string interactionName): string; ``` ## Description [#description] @@ -696,14 +689,20 @@ Track a method as an interaction. ## Example [#example] ``` csharp - string interActionId = NewRelicAgent.StartInteractionWithName("Unity InterAction Example"); + HttpClient myClient = new HttpClient(CrossNewRelicClient.Current.GetHttpMessageHandler()); + + string interactionId = CrossNewRelicClient.Current.StartInteraction("Getting data from service"); - for(int i =0; i < 4;i++) - { - Thread.Sleep(1000); - } + var response = await myClient.GetAsync(new Uri("https://jsonplaceholder.typicode.com/todos/1")); + if (response.IsSuccessStatusCode) + { + var content = await response.Content.ReadAsStringAsync(); + } else + { + Console.WriteLine("Unsuccessful response code"); + } - NewRelicAgent.StopCurrentInteraction(interActionId); + CrossNewRelicClient.Current.EndInteraction(interactionId); ``` diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/stop-interaction.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/stop-interaction.mdx index a826b4cb07e..367e1cc9e29 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/stop-interaction.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/stop-interaction.mdx @@ -38,13 +38,13 @@ freshnessValidatedDate: 2023-07-21 React Native - - - Xamarin Unity + + Xamarin + @@ -543,12 +543,12 @@ This uses the string ID for the interaction you want to end. This string is retu ``` - + ## Syntax [#syntax] ```csharp -EndInteraction(string interactionId): void; +StopCurrentInteraction(string interactionId): void; ``` ## Description [#description] @@ -594,30 +594,23 @@ This uses the string ID for the interaction you want to end. This string is retu ## Example [#example] ``` csharp - HttpClient myClient = new HttpClient(CrossNewRelicClient.Current.GetHttpMessageHandler()); - - string interactionId = CrossNewRelicClient.Current.StartInteraction("Getting data from service"); + string interActionId = NewRelicAgent.StartInteractionWithName("Unity InterAction Example"); - var response = await myClient.GetAsync(new Uri("https://jsonplaceholder.typicode.com/todos/1")); - if (response.IsSuccessStatusCode) - { - var content = await response.Content.ReadAsStringAsync(); - } else - { - Console.WriteLine("Unsuccessful response code"); - } + for(int i =0; i < 4;i++) + { + Thread.Sleep(1000); + } - CrossNewRelicClient.Current.EndInteraction(interactionId); + NewRelicAgent.StopCurrentInteraction(interActionId); ``` - - + ## Syntax [#syntax] ```csharp -StopCurrentInteraction(string interactionId): void; +EndInteraction(string interactionId): void; ``` ## Description [#description] @@ -663,16 +656,23 @@ This uses the string ID for the interaction you want to end. This string is retu ## Example [#example] ``` csharp - string interActionId = NewRelicAgent.StartInteractionWithName("Unity InterAction Example"); + HttpClient myClient = new HttpClient(CrossNewRelicClient.Current.GetHttpMessageHandler()); + + string interactionId = CrossNewRelicClient.Current.StartInteraction("Getting data from service"); - for(int i =0; i < 4;i++) - { - Thread.Sleep(1000); - } + var response = await myClient.GetAsync(new Uri("https://jsonplaceholder.typicode.com/todos/1")); + if (response.IsSuccessStatusCode) + { + var content = await response.Content.ReadAsStringAsync(); + } else + { + Console.WriteLine("Unsuccessful response code"); + } - NewRelicAgent.StopCurrentInteraction(interActionId); + CrossNewRelicClient.Current.EndInteraction(interactionId); ``` + diff --git a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/test-crash-reporting.mdx b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/test-crash-reporting.mdx index c9f9fd7658b..454d254d67f 100644 --- a/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/test-crash-reporting.mdx +++ b/src/content/docs/mobile-monitoring/new-relic-mobile/mobile-sdk/test-crash-reporting.mdx @@ -33,12 +33,12 @@ freshnessValidatedDate: 2023-07-20 React Native + + Unity + Xamarin - - Unity - @@ -412,7 +412,7 @@ Throws a demo run-time exception to test New Relic crash reporting. ``` - + ## Syntax [#syntax] @@ -422,7 +422,7 @@ CrashNow(string message = "") : void; ## Description [#description] -Throws a demo run-time exception on Android/iOS to test New Relic crash reporting. +Throws a demo run-time exception on Unity to test New Relic crash reporting. ## Parameters [#parameters] @@ -464,11 +464,11 @@ Throws a demo run-time exception on Android/iOS to test New Relic crash reportin ## Example [#example] ``` csharp - CrossNewRelicClient.Current.CrashNow(); + NewRelicAgent.CrashNow("this is crash"); ``` - + ## Syntax [#syntax] @@ -478,7 +478,7 @@ CrashNow(string message = "") : void; ## Description [#description] -Throws a demo run-time exception on Unity to test New Relic crash reporting. +Throws a demo run-time exception on Android/iOS to test New Relic crash reporting. ## Parameters [#parameters] @@ -520,7 +520,7 @@ Throws a demo run-time exception on Unity to test New Relic crash reporting. ## Example [#example] ``` csharp - NewRelicAgent.CrashNow("this is crash"); + CrossNewRelicClient.Current.CrashNow(); ``` diff --git a/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/collector/opentelemetry-collector-infra-hosts.mdx b/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/collector/opentelemetry-collector-infra-hosts.mdx index 7e5ab9d9441..a5b12b6be6c 100644 --- a/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/collector/opentelemetry-collector-infra-hosts.mdx +++ b/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/collector/opentelemetry-collector-infra-hosts.mdx @@ -154,7 +154,7 @@ exporters: endpoint: OTLP_ENDPOINT_HERE headers: api-key: YOUR_KEY_HERE -logging: + logging: service: pipelines: diff --git a/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/view-your-data/opentelemetry-errors-inbox-page.mdx b/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/view-your-data/opentelemetry-errors-inbox-page.mdx index ef267f0fa79..8c113316dab 100644 --- a/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/view-your-data/opentelemetry-errors-inbox-page.mdx +++ b/src/content/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/view-your-data/opentelemetry-errors-inbox-page.mdx @@ -35,11 +35,11 @@ As span error events are ingested, we run the events through a set of managed ru Spans where `otel.status_code = ERROR` and the span kind is either `server` or `consumer` are treated as an individual error instance. Error groups are sets of unique error instances that share a unique fingerprint. The value of the error group message is determined in the following order: -* otel.status_description -* rpc.grpc.status_code -* http.status_code +* `otel.status_description` +* `rpc.grpc.status_code` +* `http.status_code` * `undefined` if all three above are not present Troubleshooting: In the event your inbox is too noisy, check out [these tips](/docs/errors-inbox/errors-inbox/#similar-events). If on the other hand, you’re not seeing errors data in Errors inbox, check out the [requirements](/docs/errors-inbox/errors-inbox#requirements) to get started. -To learn about OpenTelemetry in other UI pages, see the [UI overview](/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/view-your-data/opentelemetry-view-your-data). \ No newline at end of file +To learn about OpenTelemetry in other UI pages, see the [UI overview](/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/view-your-data/opentelemetry-view-your-data). diff --git a/src/content/docs/more-integrations/open-source-telemetry-integrations/statsd/statsd-monitoring-integration.mdx b/src/content/docs/more-integrations/open-source-telemetry-integrations/statsd/statsd-monitoring-integration.mdx index 3d8cc53e85c..d868a46d14f 100644 --- a/src/content/docs/more-integrations/open-source-telemetry-integrations/statsd/statsd-monitoring-integration.mdx +++ b/src/content/docs/more-integrations/open-source-telemetry-integrations/statsd/statsd-monitoring-integration.mdx @@ -628,6 +628,21 @@ SELECT count(*) FROM Metric WHERE metricName = 'myMetric' and environment = 'pro For more on how to query the `Metric` data type, see [Query metric data](/docs/data-ingest-apis/get-data-new-relic/metric-api/view-query-you-metric-data). +## Troubleshooting [#troubleshooting] + +**Problem**: + +You've followed the steps to run the StatsD integration but still need to see the expected metrics in New Relic. + +**Solutions**: + +Follow the steps below to troubleshoot your configuration: + * Ensure the contains your 40 hexadecimal character license key, and it's a valid license for the selected New Relic account ID. +* Ensure the right data center, US or EU, has been selected for your New Relic account. Tip: If the license_key starts with "eu" then you must use the `NR_EU_REGION=true` flag. +* Ensure there are no [`NrIntegrationError`](/data-apis/ingest-apis/metric-api/troubleshoot-nrintegrationerror-events/) related to the StatsD integration. +* Verbose logs can be enabled using the environment variable `NR_STATSD_VERBOSE`, modify the docker run command adding the following variable: `-e NR_STATSD_VERBOSE=true`. +* A test metric can be pushed to confirm the integration is sending metrics that are expected. Example using the NetCat `nc` utility: + - `echo "example.gauge:123|g" | nc -u -w0 127.0.0.1 8125` (update `127.0.0.1` with running container IP/address). ## Check the source code [#source-code] This integration is open source software. That means you can [browse its source code](https://github.com/newrelic/nri-statsd/) and send improvements, or create your own fork and build it. diff --git a/src/content/docs/more-integrations/open-source-telemetry-integrations/wordpress/wordpress-fullstack-integration.mdx b/src/content/docs/more-integrations/open-source-telemetry-integrations/wordpress/wordpress-fullstack-integration.mdx index d37ad4e78e3..5bfbc0ec064 100644 --- a/src/content/docs/more-integrations/open-source-telemetry-integrations/wordpress/wordpress-fullstack-integration.mdx +++ b/src/content/docs/more-integrations/open-source-telemetry-integrations/wordpress/wordpress-fullstack-integration.mdx @@ -163,19 +163,19 @@ You can choose our pre-built dashboard template named `WordPress Full Stack` to Your WordPress dashboard is considered a custom dashboard and can be found in the **Dashboards** UI. For docs on using and editing dashboards, see [our dashboard docs](/docs/query-your-data/explore-query-data/dashboards/introduction-dashboards). -Here's an example NRQL query to check the delay for first input: +Here's an example NRQL query to check the delay for interaction to next paint (INP): ```sql SELECT percentage(count(*), - WHERE firstInputDelay < 100) + WHERE interactionToNextPaint < 200) AS 'Good (<100ms)', percentage(count(*), - WHERE firstInputDelay >= 100 and firstInputDelay < 300) + WHERE interactionToNextPaint >= 200 and interactionToNextPaint < 500) AS 'Needs improvement (>=100 <300ms)', percentage(count(*), - WHERE firstInputDelay >= 300) + WHERE interactionToNextPaint >= 500) AS 'Poor (> 300ms)' FROM PageViewTiming - WHERE firstInputDelay IS NOT NULL + WHERE interactionToNextPaint IS NOT NULL TIMESERIES AUTO ``` diff --git a/src/content/docs/network-performance-monitoring/advanced/advanced-config.mdx b/src/content/docs/network-performance-monitoring/advanced/advanced-config.mdx index 89c6e260b41..d4f79c43078 100644 --- a/src/content/docs/network-performance-monitoring/advanced/advanced-config.mdx +++ b/src/content/docs/network-performance-monitoring/advanced/advanced-config.mdx @@ -14,7 +14,7 @@ If you want to explore all the options you can use when configuring the monitori ## `snmp-base.yaml` sample file [#snmp-base-yml-template] -Here's an example of the various configuration options available in the `snmp-base.yaml` file used by the `ktranslate` docker image to poll for SNMP and flow data devices. You can also see a heavily-commented sample in the [KTranslate repository on GitHub](https://github.com/kentik/ktranslate/blob/main/config/snmp.yaml.sample). +Here's an example of the various configuration options available in the `snmp-base.yaml` file used by the `ktranslate` Docker image to poll for SNMP and flow data devices. You can also see a heavily-commented sample in the [KTranslate repository on GitHub](https://github.com/kentik/ktranslate/blob/main/config/snmp.yaml.sample). ```yaml # Configuration of every device monitored by this container @@ -146,11 +146,12 @@ devices: - "Production" - "Guest" product_types: - - wireless + - appliance preferences: device_status_only: true - show_vpn_peers: true hide_uplink_usage: false + show_vpn_peers: true + show_network_attr: true # Configuration for receipt of SNMP Traps trap: listen: 0.0.0.0:1620 @@ -361,7 +362,7 @@ global: @@ -961,7 +962,7 @@ global: @@ -997,7 +998,7 @@ global: @@ -1011,7 +1012,7 @@ global: @@ -1025,7 +1026,7 @@ global: @@ -1073,7 +1074,19 @@ global: + + + + + + @@ -1085,6 +1098,10 @@ global: The network monitoring agent has built-in support for retrieving keys from [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/), [Azure Key Vault](https://learn.microsoft.com/en-us/azure/key-vault/general/), and [GCP Secret Manager](https://cloud.google.com/secret-manager/docs). + + SNMPv1 and SNMPv2c do not support the use of cloud secrets as the protocols themselves send their community strings via plain text by default. If you are concerned about the security of your SNMP authentication, please update to use SNMPv3. + + - The [Meraki Dashboard API](https://developer.cisco.com/meraki/api/) integration pulls various metrics related to the health of your Meraki environment. The combination of various configuration options allows you to set up different monitoring scenarios for your needs. - - * `meraki_config.monitor_devices: true && meraki_config.preferences.device_status_only: true`: Uses the [Get Organization Device Statuses](https://developer.cisco.com/meraki/api/get-organization-devices-statuses/) endpoint to list the status of every Meraki device in the organization. - - NRQL to find device status telemetry: - - ```sql - FROM Metric SELECT - latest(status) AS 'Device Status' // Current status of this device - FACET - org_id AS 'Organization ID', - org_name AS 'Organization Name', - network_id AS 'Network ID', - network AS 'Network Name', - device_name AS 'Device Name', - src_addr AS 'Device Public IP', - mac AS 'Device MAC', - model AS 'Device Model', - serial AS 'Device Serial', - address AS 'Device Address', - lat AS 'Device Latitude', - lng AS 'Device Longitude', - notes AS 'Device Notes' - WHERE instrumentation.name = 'meraki.device_status' - ``` - -
- - * `meraki_config.monitor_org_changes: true`: Uses the [Get Organization Configuration Changes](https://developer.cisco.com/meraki/api/get-organization-configuration-changes/) endpoint to view the change log for the organization. - - NRQL to find organization configuration change telemetry: - - ```sql - FROM KExtEvent SELECT * - ``` - -
- - * `meraki_config.monitor_uplinks: true && meraki_config.preferences.hide_uplink_usage: false`: Uses both the [Get Organization Uplinks Statuses](https://developer.cisco.com/meraki/api/get-organization-uplinks-statuses/) and [Get Organization Appliance Uplinks Usage by Network](https://developer.cisco.com/meraki/api/get-organization-appliance-uplinks-usage-by-network/) endpoints to list the uplink status and performance of every Meraki MX, MG and Z series device in the organization. - - NRQL to find device uplink telemetry: - - ```sql - FROM Metric SELECT - max(kentik.meraki.uplinks.LatencyMS) AS 'Uplink Latency', // Uplink measured latency in milliseconds - max(kentik.meraki.uplinks.LossPct) AS 'Uplink Loss %', // Uplink measured loss percentage - max(kentik.meraki.uplinks.Recv) AS 'Uplink Receive Bytes', // Uplink bytes received - max(kentik.meraki.uplinks.Sent) AS 'Uplink Transmit Bytes', // Uplink bytes sent - latest(status) AS 'Uplink Status' // Latest status of the uplink - FACET - org_id AS 'Organization ID', - org_name AS 'Organization Name', - network_id AS 'Network ID', - network AS 'Network Name', - device_name AS 'Device Name', - interface AS 'Device Uplink Interface', - model AS 'Device Model', - serial AS 'Device Serial' - WHERE org_id IS NOT NULL - ``` - -
- - * `meraki_config.monitor_uplinks: true && meraki_config.preferences.hide_uplink_usage: true`: Uses the [Get Organization Uplinks Statuses](https://developer.cisco.com/meraki/api/get-organization-uplinks-statuses/) endpoint to list only the uplink status of every Meraki MX, MG and Z series device in the organization. - - NRQL to find device uplink status telemetry: - - ```sql - FROM Metric SELECT - latest(status) AS 'Uplink Status' // Latest status of the uplink - FACET - org_id AS 'Organization ID', - org_name AS 'Organization Name', - network_id AS 'Network ID', - network AS 'Network Name', - device_name AS 'Device Name', - interface AS 'Device Uplink Interface', - model AS 'Device Model', - serial AS 'Device Serial' - WHERE org_id IS NOT NULL - ``` - -
- - * `meraki_config.monitor_vpn_status: true && meraki_config.preferences.show_vpn_peers: false`: Uses the [Get Organization Appliance VPN Statuses](https://developer.cisco.com/meraki/api/get-organization-appliance-vpn-statuses/) endpoint the show VPN statuses across the networks in the organization. - - NRQL to find VPN status telemetry: - - ```sql - FROM Metric SELECT - latest(status) AS 'VPN Status' // Latest status of this VPN - FACET - org_id AS 'Organization ID', - org_name AS 'Organization Name', - network_id AS 'Network ID', - network AS 'Network Name', - device_name AS 'Device Name', - serial AS 'Device Serial', - vpn_mode AS 'VPN Mode', - wan1 OR wan2 AS 'WAN Interface IP' - WHERE instrumentation.name = 'meraki.vpn_status' - AND org_id IS NOT NULL - ``` - -
- - * `meraki_config.monitor_vpn_status: true && meraki_config.preferences.show_vpn_peers: true`: Uses the [Get Organization Appliance VPN Statuses](https://developer.cisco.com/meraki/api/get-organization-appliance-vpn-statuses/) endpoint to add information about VPN peers across the networks in the organization. - - NRQL to find VPN peers telemetry: - - ```sql - FROM Metric SELECT - latest(status) AS 'Peer Status' // Current status of this VPN peer - FACET - network_id AS 'Network ID', - network AS 'Network Name', - device_name AS 'Device Name', - serial AS 'Device Serial', - vpn_mode AS 'VPN Mode', - wan1 AS 'WAN 1 IP', - wan2 AS 'WAN 2 IP', - peer_name AS 'Peer Name', // Name of this peer - peer_reachability AS 'Peer Reachability', // Latest results of reachability test for this peer - peer_network_id AS 'Peer Network ID', // Network ID for this peer - peer_type AS 'Peer Type' // Type of Peer (Meraki vs Third-party) - WHERE metricName = 'kentik.meraki.vpn_status.PeerStatus' - ``` - - ### Primary configuration options - - - You can use the [KENTIK_MERAKI_API_KEY](/docs/network-performance-monitoring/advanced/ktranslate-container-management) environment variable to pass your API key into the Meraki integration without storing it in plain text on your configuration file. - - -
- `maxPoolSize` + `megabytes` @@ -567,7 +620,7 @@ SetMaxEventPoolSize(int maxPoolSize): void; - Required. Maximum size in megaBytes that can be stored in the file system.. + Required. Maximum size in megaBytes that can be stored in the file system..
- Timestamp when this device was last discovered by the `ktranslate` docker image. This field is informational. + Timestamp when this device was last discovered by the `ktranslate` Docker image. This field is informational.
- Indicates whether to drop all values from this cycle if polling takes longer than the value set in `poll_time_sec`. By default, it's set to `false` + Indicates whether to drop all values from this cycle if polling takes longer than the value set in `poll_time_sec`. By default, it's set to `false`.
- Array of all active MIBs the `ktranslate` docker image will poll. This list is automatically generated during discovery if the `discovery_add_mibs` attribute is `true`. MIBs not listed here will not be polled on any device in the configuration file. You can specify a SNMP table directly in a MIB file using `MIB-NAME.tableName` syntax. Ex: `HOST-RESOURCES-MIB.hrProcessorTable`. + Array of all active MIBs the `ktranslate` Docker image will poll. This list is automatically generated during discovery if the `discovery_add_mibs` attribute is `true`. MIBs not listed here will not be polled on any device in the configuration file. You can specify a SNMP table directly in a MIB file using `MIB-NAME.tableName` syntax. Ex: `HOST-RESOURCES-MIB.hrProcessorTable`.
- Time in milliseconds SNMP queries timeout. This can be overridden per device using the `devices..timeout_ms` attribute. By default, it's set to `3000` + Time in milliseconds SNMP queries timeout. This can be overridden per device using the `devices..timeout_ms` attribute. By default, it's set to `3000`.
- Number of attempts to retry failed SNMP polls. This can be overridden per device using the `devices..retries` attribute. By default, it's set to `0` + Number of attempts to retry failed SNMP polls. This can be overridden per device using the `devices..retries` attribute. By default, it's set to `0`.
- Removes devices from config file after X scheduled discovery jobs have failed. Set this to `-1` to keep devices forever, or any integer >= `1` to set up a purge threshold. (Default: `0`) + Removes devices from config file after X scheduled discovery jobs have failed. Set this to `-1` to keep devices forever, or any integer >= `1` to set up a purge threshold. By default, it's set to `0`. +
+ watch_profile_changes + + + + Sets up a watcher to reload SNMP threads on changes to profiles in the `mib_profile_dir` path. By default, it's set to `false`.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Key name Required Input Description
meraki_config.api_key ✓ (Not required if using the [KENTIK_MERAKI_API_KEY](/docs/network-performance-monitoring/advanced/ktranslate-container-management) environment variable) API Key (string) [Meraki Dashboard API key](https://documentation.meraki.com/General_Administration/Other_Topics/Cisco_Meraki_Dashboard_API#Enable_API_Access) for authentication.
meraki_config.max_http_retry - Integer between 1-10 (Default: 2) Optional setting that controls how often a retry is attempted on API requests that return a `HTTP 429` error. The interval between retries is 5 seconds.
meraki_config.monitor_devices - true | false (Default: false) Monitor the status of every Meraki device in the organization.
meraki_config.monitor_org_changes - true | false (Default: false) Monitors the change log for the organization.
meraki_config.monitor_uplinks - true | false (Default: true) Monitors the uplink status and performance of every Meraki MX, MG and Z series device in the organization.
meraki_config.monitor_vpn_status - true | false (Default: false) Monitors the VPN statuses across the networks in the organization.
- - ### Filtering options - - These options allow you to restrict monitoring to specifically targeted objects in your Meraki environment. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Key name Required Input Description
meraki_config.organizations - Regex in [RE2 syntax](https://github.com/google/re2/wiki/Syntax) (Default: null) Filters all monitoring to a specific list of organizations.
meraki_config.networks - Regex in [RE2 syntax](https://github.com/google/re2/wiki/Syntax) (Default: null) Filters all monitoring to a specific list of networks.
meraki_config.product_types - Valid types are wireless, appliance, switch, systemsManager, camera, cellularGateway, sensor, and cloudGateway. (Default: null) Adds parameters to the [monitor_devices](https://developer.cisco.com/meraki/api/get-organization-devices-statuses/) API request to filter on specific types of devices.
- - ### Additional preferences - - These options allow you to further define the data collected from the main configuration options. Various combinations are described in the examples section above. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Key name Required Input Description
meraki_config.preferences.device_status_only - true | false (Default: false) *Required* when using `monitor_devices: true` to restrict polling to only status information. **(This is used to prevent timeout issues.)**
meraki_config.preferences.hide_uplink_usage - true | false (Default: false) Used in combination with `monitor_uplinks` to remove performance metrics and only return status information for uplinks.
meraki_config.preferences.show_vpn_peers - true | false (Default: false) Used in combination with `monitor_vpn_status` to add telemetry on VPN peers.
- - ### Minimum configuration example + The [Meraki Dashboard API](https://developer.cisco.com/meraki/api/) integration pulls various metrics related to the health of your Meraki environment. The combination of configuration options allows you to set up different monitoring scenarios for your needs and creates entities in your New Relic account. + + + + + Organization metrics are collected by default under the `kentik.meraki.organization.Count` metric which is exclusively used to generate the `Meraki Organization` entity. This is mainly to enable visualization of the Meraki hierarchy to align networks and devices to their parent organization. + + * `meraki_config.monitor_org_changes: true`: Uses the [Get Organization Configuration Changes](https://developer.cisco.com/meraki/api/get-organization-configuration-changes/) endpoint to view the change log for the organization. + + NRQL to find organization configuration change telemetry: + + ```sql + FROM KExtEvent SELECT * + ``` + + + + + * `meraki_config.preferences.show_network_attr: true` + + Network metrics are collected under the `kentik.meraki.network.Count` metric which is exclusively used to generate the `Meraki Network` entity. This is mainly to enable visualization of the Meraki hierarchy and align the devices to the network they are members of. + + + + + + * `meraki_config.monitor_devices: true && meraki_config.preferences.device_status_only: true`: Uses the [Get Organization Device Statuses](https://developer.cisco.com/meraki/api/get-organization-devices-statuses/) endpoint to list the status of every Meraki device in the organization. + + NRQL to find device status telemetry: + + ```sql + FROM Metric SELECT + latest(status) AS 'Device Status' // Current status of this device + FACET + org_id AS 'Organization ID', + org_name AS 'Organization Name', + network_id AS 'Network ID', + network AS 'Network Name', + device_name AS 'Device Name', + src_addr AS 'Device Public IP', + mac AS 'Device MAC', + model AS 'Device Model', + serial AS 'Device Serial', + address AS 'Device Address', + lat AS 'Device Latitude', + lng AS 'Device Longitude', + notes AS 'Device Notes' + WHERE instrumentation.name = 'meraki.device_status' + ``` + +
+ + * `meraki_config.monitor_uplinks: true && meraki_config.preferences.hide_uplink_usage: false`: Uses both the [Get Organization Uplinks Statuses](https://developer.cisco.com/meraki/api/get-organization-uplinks-statuses/) and [Get Organization Appliance Uplinks Usage by Network](https://developer.cisco.com/meraki/api/get-organization-appliance-uplinks-usage-by-network/) endpoints to list the uplink status and performance of every Meraki MX, MG and Z series device in the organization. + + NRQL to find device uplink telemetry: + + ```sql + FROM Metric SELECT + max(kentik.meraki.uplinks.LatencyMS) AS 'Uplink Latency', // Uplink measured latency in milliseconds + max(kentik.meraki.uplinks.LossPct) AS 'Uplink Loss %', // Uplink measured loss percentage + max(kentik.meraki.uplinks.Recv) AS 'Uplink Receive Bytes', // Uplink bytes received + max(kentik.meraki.uplinks.Sent) AS 'Uplink Transmit Bytes', // Uplink bytes sent + latest(status) AS 'Uplink Status' // Latest status of the uplink + FACET + org_id AS 'Organization ID', + org_name AS 'Organization Name', + network_id AS 'Network ID', + network AS 'Network Name', + device_name AS 'Device Name', + interface AS 'Device Uplink Interface', + model AS 'Device Model', + serial AS 'Device Serial' + WHERE org_id IS NOT NULL + ``` + +
+ + * `meraki_config.monitor_uplinks: true && meraki_config.preferences.hide_uplink_usage: true`: Uses the [Get Organization Uplinks Statuses](https://developer.cisco.com/meraki/api/get-organization-uplinks-statuses/) endpoint to list only the uplink status of every Meraki MX, MG and Z series device in the organization. + + NRQL to find device uplink status telemetry: + + ```sql + FROM Metric SELECT + latest(status) AS 'Uplink Status' // Latest status of the uplink + FACET + org_id AS 'Organization ID', + org_name AS 'Organization Name', + network_id AS 'Network ID', + network AS 'Network Name', + device_name AS 'Device Name', + interface AS 'Device Uplink Interface', + model AS 'Device Model', + serial AS 'Device Serial' + WHERE org_id IS NOT NULL + ``` + +
+ + * `meraki_config.monitor_vpn_status: true && meraki_config.preferences.show_vpn_peers: false`: Uses the [Get Organization Appliance VPN Statuses](https://developer.cisco.com/meraki/api/get-organization-appliance-vpn-statuses/) endpoint the show VPN statuses across the networks in the organization. + + NRQL to find VPN status telemetry: + + ```sql + FROM Metric SELECT + latest(status) AS 'VPN Status' // Latest status of this VPN + FACET + org_id AS 'Organization ID', + org_name AS 'Organization Name', + network_id AS 'Network ID', + network AS 'Network Name', + device_name AS 'Device Name', + serial AS 'Device Serial', + vpn_mode AS 'VPN Mode', + wan1 OR wan2 AS 'WAN Interface IP' + WHERE instrumentation.name = 'meraki.vpn_status' + AND org_id IS NOT NULL + ``` + +
+ + * `meraki_config.monitor_vpn_status: true && meraki_config.preferences.show_vpn_peers: true`: Uses the [Get Organization Appliance VPN Statuses](https://developer.cisco.com/meraki/api/get-organization-appliance-vpn-statuses/) endpoint to add information about VPN peers across the networks in the organization. + + NRQL to find VPN peers telemetry: + + ```sql + FROM Metric SELECT + latest(status) AS 'Peer Status' // Current status of this VPN peer + FACET + network_id AS 'Network ID', + network AS 'Network Name', + device_name AS 'Device Name', + serial AS 'Device Serial', + vpn_mode AS 'VPN Mode', + wan1 AS 'WAN 1 IP', + wan2 AS 'WAN 2 IP', + peer_name AS 'Peer Name', // Name of this peer + peer_reachability AS 'Peer Reachability', // Latest results of reachability test for this peer + peer_network_id AS 'Peer Network ID', // Network ID for this peer + peer_type AS 'Peer Type' // Type of Peer (Meraki vs Third-party) + WHERE metricName = 'kentik.meraki.vpn_status.PeerStatus' + ``` + +
+ + + + + + + You can use the [KENTIK_MERAKI_API_KEY](/docs/network-performance-monitoring/advanced/ktranslate-container-management/#container-runtime-options) environment variable to pass your API key into the Meraki integration without storing it in plain text on your configuration file. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key name Required Input Description
meraki_config.api_key ✓ (Not required if using the [KENTIK_MERAKI_API_KEY](/docs/network-performance-monitoring/advanced/ktranslate-container-management) environment variable) API Key (string) [Meraki Dashboard API key](https://documentation.meraki.com/General_Administration/Other_Topics/Cisco_Meraki_Dashboard_API#Enable_API_Access) for authentication.
meraki_config.max_http_retry + Integer between 1-10 (Default: 2) Optional setting that controls how often a retry is attempted on API requests that return a `HTTP 429` error. The interval between retries is 5 seconds.
meraki_config.monitor_devices + true | false (Default: false) Monitor the status of every Meraki device in the organization.
meraki_config.monitor_org_changes + true | false (Default: false) Monitors the change log for the organization.
meraki_config.monitor_uplinks + true | false (Default: true) Monitors the uplink status and performance of every Meraki MX, MG and Z series device in the organization.
meraki_config.monitor_vpn_status + true | false (Default: false) Monitors the VPN statuses across the networks in the organization.
+ +
+ + + + These options allow you to restrict monitoring to specifically targeted objects in your Meraki environment. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key name Required Input Description
meraki_config.organizations + Regex in [RE2 syntax](https://github.com/google/re2/wiki/Syntax) (Default: null) Filters all monitoring to a specific list of organizations.
meraki_config.networks + Regex in [RE2 syntax](https://github.com/google/re2/wiki/Syntax) (Default: null) Filters all monitoring to a specific list of networks.
meraki_config.product_types + Valid types are wireless, appliance, switch, systemsManager, camera, cellularGateway, sensor, and cloudGateway. (Default: null) Adds parameters to the [monitor_devices](https://developer.cisco.com/meraki/api/get-organization-devices-statuses/) API request to filter on specific types of devices.
+ +
+ + + + These options allow you to further define the data collected from the main configuration options. Various combinations are described in the examples section above. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key name Required Input Description
meraki_config.preferences.device_status_only + true | false (Default: false) *Required* when using `monitor_devices: true` to restrict polling to only status information. **(This is used to prevent timeout issues.)**
meraki_config.preferences.hide_uplink_usage + true | false (Default: false) Used in combination with `monitor_uplinks` to remove performance metrics and only return status information for uplinks.
meraki_config.preferences.show_vpn_peers + true | false (Default: false) Used in combination with `monitor_vpn_status` to add telemetry on VPN peers.
meraki_config.preferences.show_network_attr + true | false (Default: false) Used to add telemetry on networks. Required to create `Meraki Network` entities.
+ +
+ +
+ + ### Minimum configuration example [#meraki-minimum-config] ```yaml # This represents the minimal configuration required for a container that only performs Meraki API polling. @@ -1964,78 +2034,107 @@ The network monitoring agent has built-in support for retrieving keys from [AWS timeout_ms: 30000 ``` - ### Full configuration example + ### Full configuration examples [#meraki-full-config] + + #### All options required to create the `Meraki Organization`, `Meraki Network`, and `Meraki Device` entities. ```yaml - # This represents a configuration using all available options that creates multiple entities in New Relic. - --- - devices: - # Entity 1 - monitor everything this API key has access to - meraki_all: - device_name: meraki_all - device_ip: snmp.meraki.com - provider: meraki-cloud-controller - ext: - ext_only: true - meraki_config: - api_key: "$YOUR_API_KEY_1" - max_http_retry: 8 - monitor_devices: true - monitor_org_changes: true - monitor_uplinks: true - monitor_vpn_status: true - preferences: - device_status_only: true - show_vpn_peers: true - hide_uplink_usage: false - # Entity 2 - Monitor these specific organizations under this API key - meraki_single_org: - device_name: meraki_single_org - device_ip: snmp.meraki.com - provider: meraki-cloud-controller - ext: - ext_only: true - meraki_config: - api_key: "$YOUR_API_KEY_2" - monitor_devices: true - monitor_org_changes: true - monitor_uplinks: true - monitor_vpn_status: true - preferences: - device_status_only: true - show_vpn_peers: true - hide_uplink_usage: false - organizations: - - "Org 1 - Prod.*" - - "Org 2 - Staging" - # Entity 3 - Monitor specific devices filtered by organization, network, and product types; using the same API key from Entity 2 - meraki_filtered: - device_name: meraki_filtered - device_ip: snmp.meraki.com - provider: meraki-cloud-controller - ext: - ext_only: true - meraki_config: - api_key: "$YOUR_API_KEY_2" - monitor_devices: true - monitor_uplinks: false - preferences: - device_status_only: true - organizations: - - "Org 3 - Remote Sites" - networks: - - "Corp.*99" - - "Retail.*" - product_types: - - wireless - - appliance - trap: {} - discovery: {} - global: - poll_time_sec: 300 - timeout_ms: 30000 + devices: + meraki_dashboard_api: + device_name: meraki_controller + device_ip: snmp.meraki.com + provider: meraki-cloud-controller + ext: + ext_only: true + meraki_config: + api_key: $YOUR_MERAKI_API_KEY + monitor_devices: true + monitor_org_changes: true + monitor_uplinks: true + monitor_vpn_status: true + preferences: + device_status_only: true + hide_uplink_usage: false + show_vpn_peers: true + show_network_attr: true + trap: {} + discovery: {} + global: + poll_time_sec: 300 + timeout_ms: 30000 + ``` + + #### Targeting multiple Meraki Dashboard API keys + + ```yaml + devices: + # Entity 1 - monitor everything this API key has access to + meraki_all: + device_name: meraki_all + device_ip: snmp.meraki.com + provider: meraki-cloud-controller + ext: + ext_only: true + meraki_config: + api_key: "$YOUR_API_KEY_1" + max_http_retry: 8 + monitor_devices: true + monitor_org_changes: true + monitor_uplinks: true + monitor_vpn_status: true + preferences: + device_status_only: true + show_vpn_peers: true + hide_uplink_usage: false + # Entity 2 - Monitor these specific organizations under this API key + meraki_single_org: + device_name: meraki_single_org + device_ip: snmp.meraki.com + provider: meraki-cloud-controller + ext: + ext_only: true + meraki_config: + api_key: "$YOUR_API_KEY_2" + monitor_devices: true + monitor_org_changes: true + monitor_uplinks: true + monitor_vpn_status: true + preferences: + device_status_only: true + show_vpn_peers: true + hide_uplink_usage: false + organizations: + - "Org 1 - Prod.*" + - "Org 2 - Staging" + # Entity 3 - Monitor specific devices filtered by organization, network, and product types; using the same API key from Entity 2 + meraki_filtered: + device_name: meraki_filtered + device_ip: snmp.meraki.com + provider: meraki-cloud-controller + ext: + ext_only: true + meraki_config: + api_key: "$YOUR_API_KEY_2" + monitor_devices: true + monitor_uplinks: false + preferences: + device_status_only: true + organizations: + - "Org 3 - Remote Sites" + networks: + - "Corp.*99" + - "Retail.*" + product_types: + - wireless + - appliance + trap: {} + discovery: {} + global: + poll_time_sec: 300 + timeout_ms: 30000 ``` +
@@ -2326,3 +2425,40 @@ By default, flow data containers will collect and process every flow packet they ``` + +## Automatically reloading custom SNMP profiles + +By default, the `ktranslate` Docker container must be manually destroyed and rebuilt to incorporate changes to the SNMP profiles in the `mib_profile_dir` path. This is normal behavior in most deployments as the Docker image pulls in the latest profiles available from the public [snmp-profiles repository](https://github.com/kentik/snmp-profiles). In situations where you provide [your custom profiles](/docs/network-performance-monitoring/advanced/snmp-profiles/#private), you can use this setting to enable the container to automatically refresh the underlying configurations and SNMP profiles for the container. + + + + This is not recursive because of a limitation in the [watcher library](https://github.com/fsnotify/fsnotify?tab=readme-ov-file#are-subdirectories-watched). So, if a profile changes in a subdirectory, you must also edit a top-level file to trigger the change. + + + + Assuming this directory structure: + +``` +. +└── /snmp-profiles/ + └── profiles/ + └── kentik-snmp/ + ├── 3com + ├── _general + ├── a10networks + └── ... +``` + +You will need to place a new file at the root of the directory and manually change it to trigger this refresh cycle. An easy way to implement this is to simply write a timestamp to a file such as `last_updated.txt` when your change is submitted. + +``` +. +└── /snmp-profiles/ + ├── last_updated.txt + └── profiles/ + └── kentik-snmp/ + ├── 3com + ├── _general + ├── a10networks + └── ... +``` diff --git a/src/content/docs/network-performance-monitoring/advanced/ktranslate-container-management.mdx b/src/content/docs/network-performance-monitoring/advanced/ktranslate-container-management.mdx index 6bfae1a364c..8f939a19cc0 100644 --- a/src/content/docs/network-performance-monitoring/advanced/ktranslate-container-management.mdx +++ b/src/content/docs/network-performance-monitoring/advanced/ktranslate-container-management.mdx @@ -445,6 +445,40 @@ Below are the various options available during Docker runtime for the + + + `-syslog.format` + + + + Flag + + + + + + Format to parse syslog messages with. Options are `Automatic|RFC3164|RFC5424|RFC6587|NoFormat`. Default: `Automatic`. + + *`NoFormat` must be explicitly set to handle messages that do not conform to RFC standards.* + + + + + + `-syslog.source` + + + + Flag + + + + + + IP:Port tuple to run the Syslog server on. Default: `0.0.0.0:5143` + + + `NEW_RELIC_API_KEY` diff --git a/src/content/docs/network-performance-monitoring/setup-performance-monitoring/network-syslog-monitoring.mdx b/src/content/docs/network-performance-monitoring/setup-performance-monitoring/network-syslog-monitoring.mdx index 18c48a4ff17..d4bbd47bcf6 100644 --- a/src/content/docs/network-performance-monitoring/setup-performance-monitoring/network-syslog-monitoring.mdx +++ b/src/content/docs/network-performance-monitoring/setup-performance-monitoring/network-syslog-monitoring.mdx @@ -62,10 +62,10 @@ If you're using Linux to install the agent as a service, you need: title="Network syslog devices prerequisites" > - `ktranslate` handles syslog in the following formats: [RFC3164](https://datatracker.ietf.org/doc/html/rfc3164), [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424), and [RFC6587](https://datatracker.ietf.org/doc/html/rfc6587). Any messages received outside of these formats will be discarded. (Notably, this includes syslogs from Meraki, which don't align with any RFC standards.) + `ktranslate` handles syslog in the following formats automatically: [RFC3164](https://datatracker.ietf.org/doc/html/rfc3164), [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424), and [RFC6587](https://datatracker.ietf.org/doc/html/rfc6587). Any messages received outside of these formats will be discarded unless you configure the [-syslog.format=NoFormat](/docs/network-performance-monitoring/advanced/ktranslate-container-management/#container-runtime-options) flag at runtime. -* Source devices must be configured to send syslog messages to the host running the network monitoring agent. Here's how to configure network syslog export in some devices (this is not an all-inclusive list): +Source devices must be configured to send syslog messages to the host running the network monitoring agent. Here's how to configure network syslog export in some devices (this is not an all-inclusive list): * [Checkpoint - Security Gateway](https://sc1.checkpoint.com/documents/R80.40/WebAdminGuides/EN/CP_R80.40_LoggingAndMonitoring_AdminGuide/Topics-LMG/Working-with-Syslog-Servers.htm). You must sign in to the User Center/PartnerMAP checkpoint. * [Cisco - ASA](https://www.cisco.com/c/en/us/support/docs/security/pix-500-series-security-appliances/63884-config-asa-00.html) * [Cisco - IOS](https://community.cisco.com/t5/networking-documents/how-to-configure-logging-in-cisco-ios/ta-p/3132434) @@ -177,26 +177,16 @@ For most use cases, we recommended our guided install to set up network flow dat 4. Run `ktranslate` to listen for network syslog messages by running: ```shell - docker run -d --name ktranslate-syslog --restart unless-stopped --pull=always -p 162:1620/udp \ + docker run -d --name ktranslate-$CONTAINER_SERVICE --restart unless-stopped --pull=always -p 514:5143/udp \ -v `pwd`/snmp-base.yaml:/snmp-base.yaml \ - # Replace with your license key -e NEW_RELIC_API_KEY=$YOUR_NR_LICENSE_KEY \ kentik/ktranslate:v2 \ -snmp /snmp-base.yaml \ - # Replace with your account ID -nr_account_id=$YOUR_NR_ACCOUNT_ID \ - # If your organization is located in Europe, add the following flag: - # -nr_region=EU \ - # If you want to use FedRAMP, add the following flag to use the FedRAMP authorized endpoints: - # -nr_region=GOV \ -metrics=jchf \ -tee_logs=true \ - # Use this field to create a unique value for `tags.container_service` inside of New Relic - -service_name=$UNIQUE_NAME \ - # Optional: To override the default listening port of "0.0.0.0:5143": - # -syslog.source=":" \ - # Optional: To set a custom DNS server for IP resolution - # -dns="$DNS_SERVER" \ + -dns=local \ + -service_name=$CONTAINER_SERVICE \ nr1.syslog ``` diff --git a/src/content/docs/network-performance-monitoring/troubleshooting/change-ktranslate-versions.mdx b/src/content/docs/network-performance-monitoring/troubleshooting/change-ktranslate-versions.mdx index 60293fb5f9d..894776863a8 100644 --- a/src/content/docs/network-performance-monitoring/troubleshooting/change-ktranslate-versions.mdx +++ b/src/content/docs/network-performance-monitoring/troubleshooting/change-ktranslate-versions.mdx @@ -21,12 +21,12 @@ The same container image may be available under several tags. For example, the n To control which release you run, take the following `docker run` example: ```shell -docker run -d --name ktranslate-snmp --restart unless-stopped --pull=always -p 162:1620/udp \ +docker run -d --name ktranslate-SNMP --restart unless-stopped --pull=always -p 162:1620/udp \ -v `pwd`/snmp-base.yaml:/snmp-base.yaml \ --e NEW_RELIC_API_KEY=********************************FFFFNRAL \ +-e NEW_RELIC_API_KEY=$YOUR_LICENSE_KEY \ kentik/ktranslate:v2 \ -snmp /snmp-base.yaml \ - -nr_account_id=0000000 \ + -nr_account_id=$YOUR_ACCOUNT_ID \ -metrics=jchf \ -tee_logs=true \ -service_name=SNMP \ @@ -38,12 +38,12 @@ kentik/ktranslate:v2 \ Remove `--pull=always` and change the container tag from `v2` to the tag of release you want to use. That'll result in: ```shell -docker run -d --name ktranslate-snmp --restart unless-stopped -p 162:1620/udp \ +docker run -d --name ktranslate-SNMP --restart unless-stopped -p 162:1620/udp \ -v `pwd`/snmp-base.yaml:/snmp-base.yaml \ --e NEW_RELIC_API_KEY=********************************FFFFNRAL \ +-e NEW_RELIC_API_KEY=$YOUR_LICENSE_KEY \ kentik/ktranslate:kt-2023-02-28-4294815650 \ -snmp /snmp-base.yaml \ - -nr_account_id=0000000 \ + -nr_account_id=$YOUR_ACCOUNT_ID \ -metrics=jchf \ -tee_logs=true \ -service_name=SNMP \ diff --git a/src/content/docs/network-performance-monitoring/troubleshooting/meraki-controller-no-data.mdx b/src/content/docs/network-performance-monitoring/troubleshooting/meraki-entity-no-data.mdx similarity index 85% rename from src/content/docs/network-performance-monitoring/troubleshooting/meraki-controller-no-data.mdx rename to src/content/docs/network-performance-monitoring/troubleshooting/meraki-entity-no-data.mdx index 1e59e3aacdd..1072a4f31e4 100644 --- a/src/content/docs/network-performance-monitoring/troubleshooting/meraki-controller-no-data.mdx +++ b/src/content/docs/network-performance-monitoring/troubleshooting/meraki-entity-no-data.mdx @@ -5,12 +5,14 @@ tags: - Network monitoring - Troubleshooting metaDescription: Meraki API polling is working, but expected metrics are missing. +redirects: + - /docs/network-performance-monitoring/troubleshooting/meraki-controller-no-data freshnessValidatedDate: never --- ## Problem [#problem] -During Meraki API monitoring, you don't see all of the expected metrics for your controller. +During Meraki API monitoring, you don't see all of the expected metrics for your entity. ## Solution [#solution] @@ -20,7 +22,8 @@ Identify what metrics exist in New Relic by running the following NRQL query: FROM Metric, KExtEvent SELECT count(*) FACET - metricName OR eventType() + metricName OR eventType(), + instrumentation.name WHERE instrumentation.name LIKE 'meraki%' OR eventType() = 'KExtEvent' SINCE 1 HOUR AGO diff --git a/src/content/docs/new-relic-solutions/best-practices-guides/cx-improve-page-load.mdx b/src/content/docs/new-relic-solutions/best-practices-guides/cx-improve-page-load.mdx index 43dbb23524f..a3b51855c17 100644 --- a/src/content/docs/new-relic-solutions/best-practices-guides/cx-improve-page-load.mdx +++ b/src/content/docs/new-relic-solutions/best-practices-guides/cx-improve-page-load.mdx @@ -75,17 +75,18 @@ Additional resources: * [Google's approach to LCP optimization](https://web.dev/optimize-lcp/). * [Lighthouse](https://developers.google.com/web/tools/lighthouse) is a tool by Google that runs a synthetic test against a specific page and provides a list of recommendations that include how to optimize CLS. -## First input delay (FID) +## Interaction to next paint (INP) [#INP] -First input delay (FID) is the time between when a user first interacts with a page to the time when the browser is able to respond. It's a field metric that varies based on real user behavior (results vary based on user impatience and action timing) but can be optimized by reducing total blocking time (TBT). +Interaction to next paint (INP) calculates when a user interacts with a page via clicks, taps, and keyboard interactions with a page throughout its lifespan. It's a field metric that varies based on real user behavior (results vary based on user impatience and action timing) but can be optimized by reducing total blocking time (TBT). To do this, you need to: * Break up long blocking tasks. * Optimize bloated JavaScript. * Look at moving logic server side and/or use web workers to run threads in the background. + Use [Browser session trace information](/docs/browser/browser-monitoring/browser-pro-features/session-traces-explore-webpages-life-cycle/) to understand where your blocking intervals are occurring and for how long they last. Additional resources: -* [Google's approach to FID optimization](https://web.dev/optimize-fid/). -* [Lighthouse](https://developers.google.com/web/tools/lighthouse) is a tool by Google that runs a synthetic test against a specific page and provides a list of recommendations that include how to optimize CLS. +* [Google's approach to INP optimization](https://web.dev/articles/optimize-inp). +* [Lighthouse](https://developers.google.com/web/tools/lighthouse) is a tool by Google that runs a synthetic test against a specific page and provides a list of recommendations that include how to optimize INP. diff --git a/src/content/docs/new-relic-solutions/best-practices-guides/full-stack-observability/browser-monitoring-best-practices-guide.mdx b/src/content/docs/new-relic-solutions/best-practices-guides/full-stack-observability/browser-monitoring-best-practices-guide.mdx index 6d88c028546..8eca57addd6 100644 --- a/src/content/docs/new-relic-solutions/best-practices-guides/full-stack-observability/browser-monitoring-best-practices-guide.mdx +++ b/src/content/docs/new-relic-solutions/best-practices-guides/full-stack-observability/browser-monitoring-best-practices-guide.mdx @@ -189,7 +189,7 @@ What happens if your core web vitals exceed your thresholds? You want to be aler New Relic provides [unified alerting](/docs/alerts-applied-intelligence/overview/#concepts-terms) across all our products, including browser monitoring, so that you'll always be in the know. We recommend setting up to monitor your core web vitals, along with these sample alerts to get you started: -* **Core web vitals:** Alert if first input delay or largest contentful paint are above the `Needs improvement` threshold. +* **Core web vitals:** Alert if interaction to next paint or largest contentful paint are above the `Needs improvement` threshold. * **Page load time:** Alert if median page load time is above 10 seconds for 5 minutes. (If the median page load time begins to spike, this suggests that something may be wrong with your webpage, causing it to significantly slow down. This complements alerting on your Apdex score.) * **JS errors:** Alert if error rate is above 5% for 5 minutes. (If your frontend error rate starts spiking, particularly after a deployment, you may have introduced bad JavaScript into your frontend that should be fixed.) diff --git a/src/content/docs/new-relic-solutions/get-started/intro-new-relic.mdx b/src/content/docs/new-relic-solutions/get-started/intro-new-relic.mdx index 5f23d5f649f..af43528e83e 100644 --- a/src/content/docs/new-relic-solutions/get-started/intro-new-relic.mdx +++ b/src/content/docs/new-relic-solutions/get-started/intro-new-relic.mdx @@ -28,8 +28,6 @@ redirects: - /docs/new-relic-one/use-new-relic-one/cross-product-functions/install-configure/install-new-relic - /docs - /docs/using-new-relic/cross-product-functions/install-configure/install-new-relic -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/new-relic-solutions/new-relic-one/introduction-new-relic-platform.mdx b/src/content/docs/new-relic-solutions/new-relic-one/introduction-new-relic-platform.mdx index 4913faad10c..5a76750b173 100644 --- a/src/content/docs/new-relic-solutions/new-relic-one/introduction-new-relic-platform.mdx +++ b/src/content/docs/new-relic-solutions/new-relic-one/introduction-new-relic-platform.mdx @@ -18,8 +18,6 @@ redirects: - /docs/new-relic-one/introduction-new-relic-one - /docs/new-relic-solutions/new-relic-one/introduction-new-relic-one - /docs/new-relic-solutions/new-relic-one/ui-data/basic-ui-features -signupBanner: - text: Monitor and improve your entire stack. 100GB free. Forever. freshnessValidatedDate: never --- diff --git a/src/content/docs/new-relic-solutions/observability-maturity/customer-experience/customer-experience-quality-foundation-guide.mdx b/src/content/docs/new-relic-solutions/observability-maturity/customer-experience/customer-experience-quality-foundation-guide.mdx index 0b724ec2ce3..fba34fb65e4 100644 --- a/src/content/docs/new-relic-solutions/observability-maturity/customer-experience/customer-experience-quality-foundation-guide.mdx +++ b/src/content/docs/new-relic-solutions/observability-maturity/customer-experience/customer-experience-quality-foundation-guide.mdx @@ -77,7 +77,7 @@ This KPI measures whether or not your application or its pages can be accessed b id="core-web-lcp-kpi" title="Largest contentful paint (LCP)" > -This KPI is part of [Core Web Vitals](https://web.dev/vitals/). Largest Contentful Paint (LCP) measures the time it takes to load the largest image after a user has navigated to a new page. +This KPI is part of [Core Web Vitals](https://web.dev/vitals/). Largest contentful paint (LCP) measures the time it takes to load the largest image after a user has navigated to a new page. **Goal:** * Reduce LCP to 2.5 seconds or better for the 75% percentile for all pages or at least the most critical pages. @@ -91,17 +91,17 @@ LCP thresholds are defined by the team at Google. The thresholds and the suppor -This KPI is part of [Core Web Vitals](https://web.dev/vitals/). It measures the interactivity of a page by tracking the time between user interaction (such as clicking a link or entering text) when the browser begins processing the event. +This KPI is part of [core web vitals](https://web.dev/vitals/). It measures how quickly a web page visually responds to user actions like clicks or taps. -**Goal:** Reduce FID to 100 milliseconds or better for the 75% percentile for all pages or at least the most critical pages. +**Goal:** Reduce INP to 200 milliseconds or better for the 75% percentile for all pages or at least the most critical pages. **Thresholds:** -* Warning: > 100 milliseconds -* Critical: > 300 milliseconds +* Warning: 200 to 500 milliseconds +* Critical: > 500 milliseconds -FID thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found on [Google's web.dev site](https://web.dev/defining-core-web-vitals-thresholds/). +INP thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found on [Google's web.dev site](https://web.dev/articles/inp). - Logging is optional with the Go agent. If you are using `newrelic.NewLogger(w)` and want more detailed output, change `newrelic.NewLogger(w)` to `newrelic.NewDebugLogger(w)`. For more information, see the [New Relic Go logging documentation on GitHub](https://github.com/newrelic/go-agent/blob/master/log.go). + Logging is optional with the Go agent. If you are using `newrelic.NewLogger(w)` and want more detailed output, change `newrelic.NewLogger(w)` to `newrelic.NewDebugLogger(w)`. For more information, see the [New Relic Go logging documentation on GitHub](https://github.com/newrelic/go-agent/blob/master/v3/newrelic/log.go). @@ -105,7 +105,7 @@ For details about the audit logging options for your APM agent's configuration f - Use [`audit_log` values](/docs/agents/ruby-agent/installation-configuration/ruby-agent-configuration#audit_log). For more information, see [Ruby agent audit log](/docs/agents/ruby-agent/troubleshooting/ruby-agent-audit-log). + Use [`audit_log` values](/docs/apm/agents/ruby-agent/configuration/ruby-agent-configuration/#audit-log). For more information, see [Ruby agent audit log](/docs/agents/ruby-agent/troubleshooting/ruby-agent-audit-log). diff --git a/src/content/docs/nrql/get-started/charts-and-dashboards-with-nrql.mdx b/src/content/docs/nrql/get-started/charts-and-dashboards-with-nrql.mdx index 085cfbcd6c3..7a2524aa02a 100644 --- a/src/content/docs/nrql/get-started/charts-and-dashboards-with-nrql.mdx +++ b/src/content/docs/nrql/get-started/charts-and-dashboards-with-nrql.mdx @@ -9,7 +9,7 @@ translate: metaDescription: "Learn the basics of charts and dashboards, and how they relate to NRQL." redirects: - /docs/query-your-data/nrql-new-relic-query-language/get-started/charts-and-dashboards-with-nrql -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- import dashboardsDashboardUi from 'images/dashboards_screenshot-crop_dashboard-ui.webp' diff --git a/src/content/docs/nrql/get-started/introduction-nrql-how-nrql-works.mdx b/src/content/docs/nrql/get-started/introduction-nrql-how-nrql-works.mdx index 5ea98f84879..7288046008c 100644 --- a/src/content/docs/nrql/get-started/introduction-nrql-how-nrql-works.mdx +++ b/src/content/docs/nrql/get-started/introduction-nrql-how-nrql-works.mdx @@ -10,7 +10,7 @@ translate: metaDescription: "Learn how to query with NRQL, NRQL syntax, and how you can explore your data." redirects: - /docs/query-your-data/nrql-new-relic-query-language/get-started/introduction-nrql-how-nrql-works -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- import queriesnrqlEventDefinitionsQueryBuilder from 'images/queries-nrql_screenshot-crop_event-definitions-query-builder.webp' @@ -77,7 +77,7 @@ Another great way to explore your data is to go into any existing dashboards you ## NRQL query examples [#examples] Here's an example of a slightly more in-depth NRQL query of `Transaction` data reported by [APM](/docs/apm). For this query: -* You choose `Transaction` as the data typ. +* You choose `Transaction` as the data type. * You use `Select` to determine the average duration. * You group results by appName using `Facet`. * You use `Timeseries` to display the data over an automated timespan. diff --git a/src/content/docs/nrql/get-started/introduction-nrql-new-relics-query-language.mdx b/src/content/docs/nrql/get-started/introduction-nrql-new-relics-query-language.mdx index 27912a72e96..d37f289ab23 100644 --- a/src/content/docs/nrql/get-started/introduction-nrql-new-relics-query-language.mdx +++ b/src/content/docs/nrql/get-started/introduction-nrql-new-relics-query-language.mdx @@ -91,7 +91,7 @@ redirects: - /docs/query-your-data/nrql-new-relic-query-language - /docs/query-your-data/nrql - /docs/query-your-data/nrql-new-relic-query-language/get-started/introduction-nrql-new-relics-query-language -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- import queriesnrqlViewQueryforChart from 'images/queries-nrql_screenshot-crop_view-query-for-chart.webp' diff --git a/src/content/docs/nrql/nrql-references/nrql-group-results-across-time.mdx b/src/content/docs/nrql/nrql-references/nrql-group-results-across-time.mdx index 582ff3a30e8..ca1a331d1bc 100644 --- a/src/content/docs/nrql/nrql-references/nrql-group-results-across-time.mdx +++ b/src/content/docs/nrql/nrql-references/nrql-group-results-across-time.mdx @@ -24,45 +24,8 @@ import queriesFacetByTimeTwoFunctions from 'images/queries_screenshot-crop_facet With [NRQL](/docs/query-data/nrql-new-relic-query-language/getting-started/introduction-nrql), you can create queries that group results across time. For example, you can group results together based on timestamps by separating them into buckets that cover a specified range of dates and times. -A screenshot displaying a NRQL query faceted by time - When using the time functions from the table below in NRQL queries, the results return in UTC. To adjust the results to your time zone, include the [`WITH TIMEZONE` clause](/docs/insights/nrql-new-relic-query-language/nrql-resources/nrql-syntax-components-functions#sel-timezone) in your query. -## Facet your NRQL query time range [#cohorts] - - -In these examples, we use a custom timestamp attribute submitted with PageView events called `createdAt`. To facet by the time of PageView event ingestion, you could use the `timestamp` attribute instead. - - -To create your NRQL query, use a [`FACET` clause](/docs/insights/nrql-new-relic-query-language/nrql-resources/nrql-syntax-components-functions#sel-facett) with a bucket function that works with a timestamp attribute. Run a standard `FACET` query, but instead of faceting by an attribute, facet by time. For example: - -```sql -SELECT count(*) FROM K8sDaemonsetSample FACET monthOf(createdAt) -``` - -The screenshot at the top of this doc shows the results of this query. - -To perform multiple functions within the same query, use NRQL's multi-facet capability: - -```sql -SELECT count(*) FROM K8sDaemonsetSample FACET dateOf(createdAt), monthOf(createdAt) -``` -NRQL facet by time with two functions - -Many time-based functions accept an optional second argument of either `string` (the default) or `numeric`, which controls the format of the result value. - -```sql -SELECT count(*) FROM K8sDaemonsetSample FACET monthOf(createdAt, numeric) -``` - @@ -271,6 +234,43 @@ SELECT count(*) FROM K8sDaemonsetSample FACET monthOf(createdAt, numeric)
+## Facet your NRQL query time range [#cohorts] + + +In these examples, we use a custom timestamp attribute submitted with PageView events called `createdAt`. To facet by the time of PageView event ingestion, you could use the `timestamp` attribute instead. + + +To create your NRQL query, use a [`FACET` clause](/docs/insights/nrql-new-relic-query-language/nrql-resources/nrql-syntax-components-functions#sel-facett) with a bucket function that works with a timestamp attribute. Run a standard `FACET` query, but instead of faceting by an attribute, facet by time. For example: + +```sql +SELECT count(*) FROM K8sDaemonsetSample FACET monthOf(createdAt) +``` + +A screenshot displaying a NRQL query faceted by time + +To perform multiple functions within the same query, use NRQL's multi-facet capability: + +```sql +SELECT count(*) FROM K8sDaemonsetSample FACET dateOf(createdAt), monthOf(createdAt) +``` +NRQL facet by time with two functions + +Many time-based functions accept an optional second argument of either `string` (the default) or `numeric`, which controls the format of the result value. + +```sql +SELECT count(*) FROM K8sDaemonsetSample FACET monthOf(createdAt, numeric) +``` + +## Facet examples [#facet-examples] + - This can also be used to test whether something returns NULL or zero. `(zero) OR 1` returns 0. `(NULL) OR 1` returns 1. + You can also use this whether something returns NULL or zero. `(zero) OR 1` returns 0, and `(NULL) OR 1` returns 1. diff --git a/src/content/docs/nrql/using-nrql/arrays-in-nrql.mdx b/src/content/docs/nrql/using-nrql/arrays-in-nrql.mdx index 4d7741564f6..f7d33c7dc4d 100644 --- a/src/content/docs/nrql/using-nrql/arrays-in-nrql.mdx +++ b/src/content/docs/nrql/using-nrql/arrays-in-nrql.mdx @@ -3,32 +3,28 @@ title: Use arrays for organization metaDescription: "How to use arrays in NRQL, the New Relic query language" redirects: - /docs/query-your-data/nrql-new-relic-query-language/get-started/arrays-in-nrql -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- -As part of New Relic's support for OpenTelemetry Protocol (OTLP) attributes can contain arrays. Arrays are a data structure that contains an ordered collection of values. +As part of our support for the OpenTelemetry Protocol (OTLP), New Relic can process attributes containing arrays. But what are arrays? Essentially, arrays are data structures containing an ordered collection of values that you can query with NRQL. Typically, arrays display in a format like `[1, 2, 3, 4, 5]` or `["US", "CA", "UK"]`. -Currently only simple arrays are supported. You can query them with NRQL. To quickly verify that any data is in an array, you can use the JSON chart type to see if the data displays as an array. - -## Simple arrays [#simple-arrays] - -Simple arrays are used to store a list of values of the same primitive type; for example, numeric, boolean, or string. An array looks like `[1, 2, 3, 4, 5]` or `["US", "CA", "UK"]`. +Currently, we only support simple, single-dimension arrays. You can use simple arrays to store a list of values of the same primitive type; for example, numeric, boolean, or string. To quickly verify that an array contains any data, you can use the JSON chart type to see if the data displays in an array structure. ## Supported features [#supported-features] -* Array attributes are supported in the `FACET` clause. -* The functions `getfield()`, `length()`, and `contains()` can be run on arrays. -* The `uniques()`, `uniqueCount()`, and `latest()` aggregator functions are also supported. +* The `FACET` clause supports array attributes. +* You can run the functions `getfield()`, `length()`, and `contains()` on arrays. +* You can also use the `uniques()`, `uniqueCount()`, and `latest()` aggregator functions. * Arrays can contain up to 64 elements. ## Unsupported features [#unsupported-features] -* Comparison operators such as `=`, `!=`, and `IN` are not supported with arrays. -* Arrays cannot be aggregated with other aggregator functions such as `sum()`, `min()`, `max()`, etc. -* Arrays cannot contain other arrays. For example, `["UK", "US", ["CA", "OR", "ATL"]]` are not allowed. -* Arrays cannot contain `NULL`. +* Comparison operators such as `=`, `!=`, and `IN` aren't supported with arrays. +* You can't aggregate arrays with other aggregator functions, such as `sum()`, `min()`, `max()`, etc. +* Arrays can't contain other arrays, such as `["UK", "US", ["CA", "OR", "ATL"]]`. +* Arrays can't contain `NULL`. -## Array functions [#array-functions] +## Array functions and examples [#array-functions] **Example:**
`durations[4]` will return `105`. @@ -65,7 +61,7 @@ Simple arrays are used to store a list of values of the same primitive type; for className="freq-link" title={contains(attribute, element)} > - Use the `contains()` function to determine if an element is present in an array. + Use the `contains()` function to see if there's an element in an array. diff --git a/src/content/docs/nrql/using-nrql/create-smoother-charts-sliding-windows.mdx b/src/content/docs/nrql/using-nrql/create-smoother-charts-sliding-windows.mdx index 5e63f6f7d41..cf1e622acd2 100644 --- a/src/content/docs/nrql/using-nrql/create-smoother-charts-sliding-windows.mdx +++ b/src/content/docs/nrql/using-nrql/create-smoother-charts-sliding-windows.mdx @@ -10,7 +10,7 @@ redirects: - /docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials - /docs/new-relic-query-language/nrql-query-tutorials - /docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/create-smoother-charts-sliding-windows -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- import queriesnrqlSlidebyWindowTimeSeries from 'images/queries-nrql_diagram_slide-by-window-time-series.webp' @@ -21,10 +21,10 @@ import queriesnrqlTimeseriesSpikyNRQLQueryBuilder from 'images/queries-nrql_scre import queriesnrqlTimeseriesSmoothedWithSlidebyNRQLQueryBuilder from 'images/queries-nrql_screenshot-full_timeseries-smoothed-with-slide-by-NRQL-query-builder.webp' -Sliding windows are a technique for generating charts using the `SLIDE BY` clause in conjunction with the `TIMESERIES` clause. With sliding windows, data is gathered in time "windows" that overlap with each other. +You can use the sliding windows function to create charts using the `SLIDE BY` clause in conjunction with the `TIMESERIES` clause, which lets you gather data in time "windows" that overlap with each other. - Sliding windows are also available via the UI in the condition [advanced signal settings](/docs/alerts-applied-intelligence/new-relic-alerts/alert-conditions/create-nrql-alert-conditions/#advanced-signal). + You can also use sliding windows via the UI in the condition [advanced signal settings](/docs/alerts-applied-intelligence/new-relic-alerts/alert-conditions/create-nrql-alert-conditions/#advanced-signal). For example, in the image below, a query gathers data with 5 minute windows. The windows "slide" by 1 minute. Each window overlaps with the previous window by 4 minutes. @@ -39,7 +39,7 @@ For example, in the image below, a query gathers data with 5 minute windows. The 5-minute windows with 1-minute "slide" -In contrast, with "tumbling" or "cascading" windows, the windows do not overlap. For example, in this `TIMESERIES 3 minutes` NRQL query, the windows are 3 minutes in length, with each beginning when the other ends. There is no overlap in the measurement interval. +In contrast to sliding windows, "tumbling" or "cascading" windows don't overlap. For example, in this `TIMESERIES 3 minutes` NRQL query, the windows are 3 minutes in length, with each beginning when the other ends without an overlap in the measurement interval. -## Valid NRQL syntax for `SLIDE BY` [#valid-syntax] +## `SLIDE BY` syntax [#valid-syntax] -Valid NRQL syntax for the `SLIDE BY` clause will follow the format below. +Valid NRQL syntax for the `SLIDE BY` clause follows the format below. ```sql SELECT ... TIMESERIES integer1 units SLIDE BY integer2 units @@ -89,17 +89,19 @@ SELECT ... TIMESERIES integer1 units SLIDE BY integer2 units `integer1` specifies the sliding window width and `integer2` specifies the `SLIDE BY` interval. `units` is a time unit, such as `second`, `minute`, `hour`, or `day`. All standard NRQL time units are accepted. -Here’s a real-life example. It shows 5-minute `TIMESERIES` windows with a 1-minute `SLIDE BY` interval. +Here’s a real-life example showing 5-minute `TIMESERIES` windows with a 1-minute `SLIDE BY` interval. ```sql SELECT average(duration) from Transaction TIMESERIES 5 minutes SLIDE BY 1 minute ``` -## Translation from PromQL-style queries [#translate-promql] + + When paired with `SLIDE BY`, `TIMESERIES` does not support `AUTO` or `MAX`. The `TIMESERIES` value must be an integer time unit value. In other words, `SLIDE BY AUTO` or `SLIDE BY MAX` will work, but `TIMESERIES AUTO` or `TIMESERIES MAX` followed by `SLIDE BY` and `MAX`, `AUTO`, or a specific integer time unit is not supported. + -When applicable, a PromQL-style query is translated into a NRQL sliding window query. +## Translation from PromQL-style queries [#translate-promql] -For example, if your PromQL style query uses `rate(request_count[5m])` for the past 60 minutes with a 1-minute window overlap, the NRQL translation would be the query below. +When applicable, a PromQL-style query is translated into a NRQL sliding window query. For example, if your PromQL style query uses `rate(request_count[5m])` for the past 60 minutes with a 1-minute window overlap, here's how that query would translate into NRQL. ```sql SELECT rate(sum(request_count), 1 SECONDS) FROM Metric SINCE 3600 SECONDS AGO UNTIL NOW @@ -120,10 +122,6 @@ SELECT average(duration) FROM Transaction TIMESERIES 5 minutes SLIDE BY MAX SELECT average(duration) FROM Transaction TIMESERIES 5 minutes SLIDE BY AUTO ``` - - When paired with `SLIDE BY`, `TIMESERIES` does not support `AUTO` or `MAX`. The `TIMESERIES` value must be an integer time unit value. In other words, `SLIDE BY AUTO` or `SLIDE BY MAX` will work, but `TIMESERIES AUTO` or `TIMESERIES MAX` followed by `SLIDE BY` and `MAX`, `AUTO`, or a specific integer time unit is not supported. - - The `SLIDE BY` value as determined by `AUTO` or `MAX` can produce a step interval greater than the window size, which will show up as gaps and unexpected results. If you experience these issues with query results, consider checking for instances of `SLIDE BY` where the step interval exceeds the window size. diff --git a/src/content/docs/nrql/using-nrql/funnels-evaluate-data-series-related-events.mdx b/src/content/docs/nrql/using-nrql/funnels-evaluate-data-series-related-events.mdx index c1b8df1a60e..3c2c2327b1c 100644 --- a/src/content/docs/nrql/using-nrql/funnels-evaluate-data-series-related-events.mdx +++ b/src/content/docs/nrql/using-nrql/funnels-evaluate-data-series-related-events.mdx @@ -13,16 +13,14 @@ redirects: - /docs/insights/nrql-new-relic-query-language/nrql-query-examples/funnels-evaluate-data-series-events - /docs/query-data/nrql-new-relic-query-language/nrql-query-examples/funnels-evaluate-data-series-events - /docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/funnels-evaluate-data-series-related-events -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- import queriesnrqlFunnelNRQL from 'images/queries-nrql_screenshot-crop_funnel-NRQL.webp' -With [NRQL](/docs/query-data/nrql-new-relic-query-language/getting-started/introduction-nrql), you can use funnels to evaluate sets of related actions. The actions generally share a relationship via an identifier such as a user ID or session ID. +With [NRQL](/docs/query-data/nrql-new-relic-query-language/getting-started/introduction-nrql), you can use funnels to evaluate sets of related actions. The actions generally share a relationship via an identifier, such as a user ID or session ID. -## Why it matters [#why-matters] - -NRQL `funnel` functions can answer questions like, "Of the people that completed step A, how many also completed step B, and of the people that completed steps A and B, how many also completed step C?" +You can use `funnel` functions to answer questions like, "Of the people that completed step A, how many also completed step B, and of the people that completed steps A and B, how many also completed step C?" For example, at New Relic, we could use `funnel` to track the number of users who completed these steps: @@ -40,23 +38,21 @@ FROM DATA_TYPE SINCE TIMEFRAME ``` -See also a more complex [funnel query example](#example). - Funnel queries require the [`funnel` function](/docs/query-data/nrql-new-relic-query-language/getting-started/nrql-syntax-components-functions#func-funnel), an [attribute](/docs/using-new-relic/welcome-new-relic/get-started/glossary#attribute) to funnel, and at least two steps: -1. The first step is the anchor step, which will always represent 100% of the results. -2. The second and later steps describe the number of users who have also completed additional actions. This number typically will be less than 100%. However, it could be 100% if every user who completes action (A) also completes the additional actions being queried. +1. Begin with the anchor step, which always represents 100% of the results. +2. Continue with an additional step or steps that describe the number of users who have also completed additional actions, typically than the 100% from the anchor step. However, it could be 100% if every user who completes action (A) also completes the additional actions you're querying. ## Funnel query technical details -One way to use funnel queries is to calculate the rate at which multi-step, ordered sequences were completed over a given timeframe, like in the New Relic signup [example](#why-matters). But you can also use funnels to calculate overlap between several distinct actions that don't have a sequential relationship to one another. In other words, the order of steps doesn't impact the calculations performed. +You can use funnel queries to calculate the completion rate for multi-step, ordered sequences over a given timeframe, like in the New Relic signup [example](#why-matters). You can also use funnels to calculate overlap between several distinct actions that don't have a sequential relationship to one another. In other words, the order of steps won't impact the calculations. The way funnel queries compute results varies depending on the size of the data set: -* If the funnel is dealing with fewer than 256 funnel attribute values, it will calculate the value exactly. -* If it's dealing with 256 or more funnel attribute values, it applies an algorithm called MinHash to calculate approximate results for optimized performance. +* If the funnel is processing with fewer than 256 funnel attribute values, it will calculate the value exactly. +* If the funnel deals with 256 or more funnel attribute values, it applies an algorithm called MinHash to calculate approximate results for optimized performance. -This means that for large data sets, there may be occasional "false positives" at the level of individual data points. These do not interfere greatly with the accuracy of the numerical estimates provided in query results. +This means that for large data sets, there may be occasional "false positives" at the level of individual data points. These don't interfere with the accuracy of the numerical estimates provided in query results. Here's a detailed breakdown of technical details and constraints for funnel queries. @@ -116,9 +112,9 @@ Here's a detailed breakdown of technical details and constraints for funnel quer -## Funnel query example [#example] +## Funnel example [#example] -This example queries the [`PageView`](/attribute-dictionary/?event=PageView) event and its attributes. It queries unique browser sessions that have progressed from browsing a product, to adding to cart, to checkout. Labels are included for each step, indicated by the keyword `AS`. +This example queries the [`PageView`](/attribute-dictionary/?event=PageView) event and its attributes. It queries unique browser sessions from your users that have progressed from browsing for products, to adding them to their cart, to checking out. It includes labels for each step, indicated by the keyword `AS`. ```sql FROM PageView SELECT funnel(session, diff --git a/src/content/docs/nrql/using-nrql/lookups.mdx b/src/content/docs/nrql/using-nrql/lookups.mdx index d0bc9f1f5f9..13fa7fdda60 100644 --- a/src/content/docs/nrql/using-nrql/lookups.mdx +++ b/src/content/docs/nrql/using-nrql/lookups.mdx @@ -7,7 +7,7 @@ tags: metaDescription: In New Relic, how to query data added via CSV lookup tables. redirects: - /docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/lookups -freshnessValidatedDate: 2023-06-21 +freshnessValidatedDate: 2024-03-19 --- import nrqlExampleStatusCodes from 'images/nrql_screenshot-crop_example-status-codes.webp' @@ -16,31 +16,45 @@ import nrqlLookupQueryTranslateItemIds from 'images/nrql_screenshot-crop_lookup- import nrqlLookupQueryGeoip from 'images/nrql_screenshot-crop_lookup-query-geoip.webp' -When you [upload CSV-format lookup tables](/docs/logs/ui-data/lookup-tables-ui), you can use the `lookup()` function to access that data in your NRQL queries. - -## Why use lookup tables? [#why] - -For why you'd use lookup tables and how to upload them, see the [Lookup tables overview](/docs/logs/ui-data/lookup-tables-ui/#overview). +When you [upload CSV-format lookup tables](/docs/logs/ui-data/lookup-tables-ui), you can use the `lookup()` function to access that data in your NRQL queries. You can use lookup tables to help you parse telemetry data and customize your data groupings. ## Basic query syntax [#basic-syntax] -Let's say you've named your table `storeNames`. This query will select all data from that table: +The following lookup query shows the basic syntax for this function using a table named `storeNames` and selecting all the data from that table: ```sql FROM lookup(storeNames) SELECT * ``` -This query will select some specific attributes from that same table: +This query selects specific attributes from that same table: ```sql FROM lookup(storeNames) SELECT store_ID, store_name, description ``` -## Query examples [#query-with-data] +## Query limits [#query-limits] + +Lookup tables support a higher [`LIMIT`](/docs/query-your-data/nrql-new-relic-query-language/get-started/nrql-syntax-clauses-functions/#sel-limit) when querying than other NRQL data types do. You can set a `LIMIT` of up to 20,000 when using a lookup table within a query, [subquery](/docs/query-your-data/nrql-new-relic-query-language/get-started/subqueries-in-nrql/), and [nested aggregation](/docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/nested-aggregation-make-ordered-computations-single-query/). + + +When you use a lookup table within a subquery, the outer query `LIMIT` is bound by the standard maximum value unless it also uses a lookup table. + + +## Technical limitations [#limitations] + +* You can't use lookup data with NRQL alert conditions. +* You can only query lookup tables from the [account](/docs/accounts/accounts-billing/account-structure/new-relic-account-structure/#organization-accounts) you uploaded the lookup table in. +* The following NRQL clauses aren't supported with lookup queries: + * `TIMESERIES` + * `COMPARE WITH` + * `EXTRAPOLATE` + * **Note:** You can use these clauses if you contain the lookup query in an inner query. See [this query](#item-ids) for an example. + +## Lookup examples [#query-with-data] -The primary benefit of lookup tables is that you can use queries that combine that data with your New Relic-stored telemetry data. +Lookup tables allow you to use queries that combine data with your New Relic-stored telemetry data. Here are some query examples: @@ -64,7 +78,7 @@ WHERE hostname IN (FROM lookup(myHosts) SELECT uniques(myHost)) title="Query using JOIN" > -Using `JOIN` queries can make your data more understandable. For example, this query for a custom event type uses the `storeNames` table to show the store names along with the total sales. +Using `JOIN` queries can make your data easier to understand. For example, this query for a custom event type uses the `storeNames` table to show the store names along with the total sales. ```sql FROM StoreEvent @@ -72,7 +86,7 @@ JOIN (FROM lookup(storeNames) SELECT store_ID as storeId, storeName AS name LIMI SELECT shopId, storeName, totalSales ``` -Also notice this allows a limit of 10,000: that's because lookup tables support a higher limit than other NRQL data types. For more details, see the [query limits](#query-limits) section below. +This allows a limit of 10,000 because lookup tables support a higher limit than other NRQL data types, as is mentioned in the [query limits](#query-limits) section. @@ -168,23 +182,4 @@ Here are some example results: /> - - - -## Query Limits [#query-limits] - -Lookup tables support a higher [`LIMIT`](/docs/query-your-data/nrql-new-relic-query-language/get-started/nrql-syntax-clauses-functions/#sel-limit) when querying than other NRQL data types. You can set a `LIMIT` of up to 20,000 when using a lookup table within a query, [subquery](/docs/query-your-data/nrql-new-relic-query-language/get-started/subqueries-in-nrql/), and [nested aggregation](/docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/nested-aggregation-make-ordered-computations-single-query/). - - -When you use a lookup table within a subquery, the outer query `LIMIT` is bound by the standard maximum value unless it also uses a lookup table. - - -## Technical Limitations [#limitations] - -* You can't use lookup data for NRQL alert conditions. -* You can only query lookup tables from the [account](/docs/accounts/accounts-billing/account-structure/new-relic-account-structure/#organization-accounts) you uploaded the lookup table in. -* The following NRQL clauses aren't supported with lookup queries: - * `TIMESERIES` - * `COMPARE WITH` - * `EXTRAPOLATE` - * **Note:** You can use these clauses if you cointain the lookup query in an inner query. See [this query](#item-ids) for an example. + \ No newline at end of file diff --git a/src/content/docs/nrql/using-nrql/nested-aggregation-make-ordered-computations-single-query.mdx b/src/content/docs/nrql/using-nrql/nested-aggregation-make-ordered-computations-single-query.mdx index 8d328d44be9..b7497b2d5d4 100644 --- a/src/content/docs/nrql/using-nrql/nested-aggregation-make-ordered-computations-single-query.mdx +++ b/src/content/docs/nrql/using-nrql/nested-aggregation-make-ordered-computations-single-query.mdx @@ -13,7 +13,7 @@ redirects: - /docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/simulate-sql-join-functions-insights - /docs/insights/use-insights-ui/explore-data/simulate-sql-join-functions-insights/ - /docs/query-your-data/nrql-new-relic-query-language/nrql-query-tutorials/nested-aggregation-make-ordered-computations-single-query -freshnessValidatedDate: never +freshnessValidatedDate: 2024-03-19 --- With nested aggregation, you can complete a [NRQL query](/docs/query-data/nrql-new-relic-query-language/getting-started/introduction-nrql), then make additional computations using the results of that query. Nested aggregation provides NRQL capabilities similar to the class of SQL subqueries or subselects where the subquery is in the `FROM` clause of the outer query. @@ -22,26 +22,17 @@ With nested aggregation, you can complete a [NRQL query](/docs/query-data/nrql-n This feature is different from our [subquery feature](/docs/query-your-data/nrql-new-relic-query-language/get-started/subqueries-in-nrql), which allows for subqueries in `SELECT` and `WHERE` clauses. -## Answer complex questions with a single query [#why-matters] - Nested aggregation can help you to answer questions like these without building multiple queries: * How can I count the requests per minute for my application, then get the maximum requests per minute for the last hour? * How can I compute the average CPU usage of all my servers or hosts, and list only the ones with usage over 90%? * From all my user sessions, how can I figure out what percentage bounced immediately? -For an example of how to use nested aggregation with an app's error rate query to get percentage data and more, watch this YouTube video (approx. 3:10 minutes). - -