diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..80b14dd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +.dockerignore +.env +.git +.gitignore +**/.vs +.vscode +**/bin +**/obj +**/.toolstarget \ No newline at end of file diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..b13ff45 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,111 @@ +# editorconfig.org +[*] +indent_style = space +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.cs] +# Naming rules for fields +dotnet_naming_rule.private_fields_with_underscore.symbols = private_field +dotnet_naming_rule.private_fields_with_underscore.style = prefix_underscore +dotnet_naming_rule.private_fields_with_underscore.severity = warning + +dotnet_naming_symbols.private_field.applicable_kinds = field +dotnet_naming_symbols.private_field.applicable_accessibilities = private +dotnet_naming_symbols.private_field.required_modifiers = + +dotnet_naming_style.prefix_underscore.capitalization = camel_case +dotnet_naming_style.prefix_underscore.required_prefix = _ + +# Naming rules for properties +dotnet_naming_rule.private_properties_with_underscore.symbols = private_property +dotnet_naming_rule.private_properties_with_underscore.style = prefix_underscore +dotnet_naming_rule.private_properties_with_underscore.severity = warning + +dotnet_naming_symbols.private_property.applicable_kinds = property +dotnet_naming_symbols.private_property.applicable_accessibilities = private +dotnet_naming_symbols.private_property.required_modifiers = + +dotnet_naming_style.prefix_underscore.capitalization = camel_case +dotnet_naming_style.prefix_underscore.required_prefix = _ + +# Do not use 'this.' for private fields +dotnet_diagnostic.DOTNET_Naming_Style_DoNotUseThisForPrivateFields.severity = warning +dotnet_diagnostic.DOTNET_Naming_Style_DoNotUseThisForPrivateFields.symbols = field_like + +dotnet_naming_rule.style_dotnet_naming_rule_DotNetNamingStyle.DoNotUseThisForPrivateFields.symbols = field_like +dotnet_naming_rule.style_dotnet_naming_rule_DotNetNamingStyle.DoNotUseThisForPrivateFields.style = this_prefix +dotnet_naming_rule.style_dotnet_naming_rule_DotNetNamingStyle.DoNotUseThisForPrivateFields.severity = warning + +# name all constant fields using PascalCase +dotnet_naming_rule.constant_fields_should_be_pascal_case.severity = warning +dotnet_naming_rule.constant_fields_should_be_pascal_case.symbols = constant_fields +dotnet_naming_rule.constant_fields_should_be_pascal_case.style = pascal_case_style + +dotnet_naming_symbols.constant_fields.applicable_kinds = field +dotnet_naming_symbols.constant_fields.required_modifiers = const + +dotnet_naming_style.pascal_case_style.capitalization = pascal_case + +# static fields should have s_ prefix +dotnet_naming_rule.static_fields_should_have_prefix.severity = warning +dotnet_naming_rule.static_fields_should_have_prefix.symbols = static_fields +dotnet_naming_rule.static_fields_should_have_prefix.style = static_prefix_style + +dotnet_naming_symbols.static_fields.applicable_kinds = field +dotnet_naming_symbols.static_fields.required_modifiers = static + +dotnet_naming_symbols.static_fields.applicable_accessibilities = private, internal, private_protected +dotnet_naming_style.static_prefix_style.required_prefix = s_ + +dotnet_naming_style.static_prefix_style.capitalization = camel_case + +csharp_indent_labels = one_less_than_current +csharp_using_directive_placement = outside_namespace:silent +csharp_prefer_simple_using_statement = true:suggestion +csharp_prefer_braces = true:silent +csharp_style_namespace_declarations = block_scoped:silent +csharp_style_prefer_method_group_conversion = true:silent +csharp_style_prefer_top_level_statements = true:silent +csharp_style_prefer_primary_constructors = true:suggestion +csharp_style_expression_bodied_methods = false:silent +csharp_style_expression_bodied_constructors = false:silent +csharp_style_expression_bodied_operators = false:silent +csharp_style_expression_bodied_properties = true:silent +csharp_style_expression_bodied_indexers = true:silent +csharp_style_expression_bodied_accessors = true:silent +csharp_style_expression_bodied_lambdas = true:silent +csharp_style_expression_bodied_local_functions = false:silent + +[*.{cs,vb}] +dotnet_style_operator_placement_when_wrapping = beginning_of_line +tab_width = 4 +indent_size = 4 +end_of_line = crlf +dotnet_sort_system_directives_first = true +dotnet_style_coalesce_expression = true:suggestion +dotnet_style_null_propagation = true:suggestion +dotnet_style_prefer_is_null_check_over_reference_equality_method = true:suggestion +dotnet_style_prefer_auto_properties = true:silent +dotnet_style_object_initializer = true:suggestion +dotnet_style_prefer_collection_expression = true:suggestion +dotnet_style_collection_initializer = true:suggestion +dotnet_style_prefer_simplified_boolean_expressions = true:suggestion +dotnet_style_prefer_conditional_expression_over_assignment = true:silent +dotnet_style_prefer_conditional_expression_over_return = true:silent +dotnet_style_explicit_tuple_names = true:suggestion +dotnet_style_prefer_inferred_tuple_names = true:suggestion +dotnet_style_prefer_inferred_anonymous_type_member_names = true:suggestion +dotnet_style_prefer_compound_assignment = true:suggestion +dotnet_style_prefer_simplified_interpolation = true:suggestion + +[*.{xml,config,*proj,nuspec,props,resx,targets,yml,tasks}] +indent_size = 2 + +# Xml config files +[*.{props,targets,ruleset,config,nuspec,resx,vsixmanifest,vsct}] +indent_size = 2 + +[*.json] +indent_size = 2 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..a79183e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,55 @@ +name: 🐞 Bug report +description: Create a report to help us improve +title: '[Bug Report]:' +labels: + - needs triage + - bug +body: +- type: checkboxes + attributes: + label: Prerequisites + options: + - label: I have searched [issues](https://github.com/Farfetch/loadshedding/issues) to ensure it has not already been reported + required: true + +- type: textarea + attributes: + label: Description + description: A clear and concise description of what the bug is. + validations: + required: true + +- type: textarea + attributes: + label: Steps to reproduce + description: Write down the steps to reproduce the bug. + placeholder: | + 1. Step 1... + 2. Step 2... + validations: + required: true + +- type: textarea + attributes: + label: Expected behavior + description: | + Describe what you would expect. Write down what you thought would happen. + placeholder: Write what you thought would happen. + validations: + required: true + +- type: textarea + attributes: + label: Actual behavior + description: | + Provide a description of the actual behavior observed. If applicable please include any error messages or exception stacktraces. + placeholder: Write what happened. + validations: + required: true + +- type: input + attributes: + label: LoadShedding version + placeholder: e.g., v2.1.0 + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..7e665f2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Documentation 📚 + url: https://farfetch.github.io/loadshedding/docs/ + about: Check out the official docs for answers to common questions \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000..1ffac6a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,35 @@ +name: 💡 Feature Request +description: Suggest a new idea +title: '[Feature Request]:' +labels: + - needs triage + - enhancement +body: + - type: textarea + id: problem + attributes: + label: Is your request related to a problem you have? + description: >- + A description of the problem you are trying to address. + - type: textarea + id: solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. Include any alternative solutions or features you've considered. + validations: + required: true + - type: dropdown + id: help + attributes: + label: Are you able to help bring it to life and contribute with a Pull Request? + options: + - 'No' + - 'Yes' + validations: + required: true + - type: textarea + id: context + attributes: + label: Additional context + description: Add any other context or screenshots about the feature request here. + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..7bcea43 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,20 @@ +# Description + +Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. + +Fixes # (issue) + +## How Has This Been Tested? + +Please describe the tests that you ran to verify your changes. + +## Checklist + +- [ ] My code follows the style guidelines of this project +- [ ] I have performed a self-review of my own code +- [ ] I have added tests to cover my changes +- [ ] I have made corresponding changes to the documentation + +### Disclaimer + +By sending us your contributions, you are agreeing that your contribution is made subject to the terms of our [Contributor Ownership Statement](https://github.com/Farfetch/.github/blob/master/COS.md) diff --git a/.gitignore b/.gitignore index fd7ef65..76c0cfa 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,14 @@ +### DotnetCore ### +# .NET Core build folders +/bin +/obj + + +### VisualStudio ### ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. ## -## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore +## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore # User-specific files *.rsuser @@ -23,14 +30,12 @@ mono_crash.* [Rr]eleases/ x64/ x86/ -[Ww][Ii][Nn]32/ [Aa][Rr][Mm]/ [Aa][Rr][Mm]64/ bld/ [Bb]in/ [Oo]bj/ [Ll]og/ -[Ll]ogs/ # Visual Studio 2015/2017 cache/options directory .vs/ @@ -62,18 +67,15 @@ project.lock.json project.fragment.lock.json artifacts/ -# ASP.NET Scaffolding -ScaffoldingReadMe.txt - # StyleCop StyleCopReport.xml +StyleCop.Cache # Files built by Visual Studio *_i.c *_p.c *_h.h *.ilk -*.meta *.obj *.iobj *.pch @@ -90,7 +92,6 @@ StyleCopReport.xml *.tmp_proj *_wpftmp.csproj *.log -*.tlog *.vspscc *.vssscc .builds @@ -132,6 +133,9 @@ _ReSharper*/ *.[Rr]e[Ss]harper *.DotSettings.user +# JustCode is a .NET coding add-in +.JustCode + # TeamCity is a build add-in _TeamCity* @@ -142,11 +146,6 @@ _TeamCity* .axoCover/* !.axoCover/settings.json -# Coverlet is a free, cross platform Code Coverage Tool -coverage*.json -coverage*.xml -coverage*.info - # Visual Studio code coverage results *.coverage *.coveragexml @@ -294,17 +293,6 @@ node_modules/ # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) *.vbw -# Visual Studio 6 auto-generated project file (contains which files were open etc.) -*.vbp - -# Visual Studio 6 workspace and project file (working project files containing files to include in project) -*.dsw -*.dsp - -# Visual Studio 6 technical files -*.ncb -*.aps - # Visual Studio LightSwitch build output **/*.HTMLClient/GeneratedArtifacts **/*.DesktopClient/GeneratedArtifacts @@ -361,41 +349,33 @@ ASALocalRun/ # Local History for Visual Studio .localhistory/ -# Visual Studio History (VSHistory) files -.vshistory/ - # BeatPulse healthcheck temp database healthchecksdb # Backup folder for Package Reference Convert tool in Visual Studio 2017 MigrationBackup/ -# Ionide (cross platform F# VS Code tools) working folder -.ionide/ - -# Fody - auto-generated XML schema -FodyWeavers.xsd +.vscode/launch.json +.vscode/tasks.json +**/swagger.*.json -# VS Code files for those working on multiple tools -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json -*.code-workspace +# JetBrains Rider +.idea/ +*.sln.iml -# Local History for Visual Studio Code -.history/ +#Env file +.env -# Windows Installer files from build outputs -*.cab -*.msi -*.msix -*.msm -*.msp +# Test reports +reports/* +results.jtl # JetBrains Rider *.sln.iml # macOS -.DS_Store \ No newline at end of file +.DS_Store + +# Sonarlint +.sonarlint/ + diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..42eb4ea --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,17 @@ +### Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.0] + +### Added + +- Added Adaptative Concurrency Limiter +- Added request prioritization support +- Added endpoint request prioritization attribute +- Implement Concurrency Adaptative Limiter metrics +- Added Task Execution Time metrics and API refactoring +- Add support to enable/disable metrics \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..9952a11 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see + \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..26e871b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contributing + +Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great. + +Please note that this project is released with a [Contributor Code of Conduct][code-of-conduct]. By participating in this project you agree to abide by its terms. + +## Submitting a pull request + +### Before opening a Pull Request + +We recommend [opening an issue](https://github.com/Farfetch/loadshedding/issues) before a substantial Pull Request if there isn’t [already an issue](https://github.com/Farfetch/loadshedding/issues) for what you’d like to contribute. This helps facilitate a discussion before deciding on an implementation approach. + +For some changes, such as typo fixes, documentation enhancements, or broken links, it may be suitable to open a small Pull Request by itself. + +### How to open a Pull Request + +1. Check the issues or open a new one +2. Fork this repository +3. Create your feature branch: `git checkout -b my-new-feature` +4. Commit your changes: `git commit -am 'feat: Add some feature'` +5. Push to the branch: `git push origin my-new-feature` +6. Submit a pull request linked to the issue 1. + +Here are a few things you can do that will increase the likelihood of your pull request being accepted: + +- Follow the overall style of the project +- Write tests +- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, submit them as separate pull requests +- Write [good commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) following [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) +- Open a pull request with a title following [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) + +## Running Integration Tests + +You can find a Makefile with steps for running using docker on the repository root. + +## Resources + +- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) +- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) +- [GitHub Help](https://help.github.com) + +### Disclaimer + +By sending us your contributions, you are agreeing that your contribution is made subject to the terms of our [Contributor Ownership Statement](https://github.com/Farfetch/.github/blob/master/COS.md) + +[code-of-conduct]: CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/LICENSE b/LICENSE index caf2787..b0d1f2c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 FARFETCH +Copyright (c) 2024 FARFETCH Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE. \ No newline at end of file diff --git a/LoadShedding.sln b/LoadShedding.sln new file mode 100644 index 0000000..1d410eb --- /dev/null +++ b/LoadShedding.sln @@ -0,0 +1,112 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.4.33110.190 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{CDACCBBE-1F18-4DAE-A196-F5DE80EF8BEE}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{693C5879-B401-4398-BAA1-7FAF8B610BDD}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{BB0DD68C-4E4B-46DE-86B0-BA365448F9A2}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "unit", "unit", "{E149B487-146C-4AA4-BFC9-4A42FBEA3E05}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Samples.WebApi", "samples\Samples.WebApi\Samples.WebApi.csproj", "{8853CFF3-987F-469A-9D3D-24C875B7DFB4}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "integration", "integration", "{6143CC7F-F882-4A90-9CAB-E11597429CC4}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "benchmark", "benchmark", "{D91E6E3A-4165-4934-9462-E62B04250932}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "performance", "performance", "{A9CDBF8F-C28F-48CC-A01F-6BE76A3DD5A2}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding", "src\Farfetch.LoadShedding\Farfetch.LoadShedding.csproj", "{59E73202-B2B1-4098-94B3-348C853C6583}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.AspNetCore", "src\Farfetch.LoadShedding.AspNetCore\Farfetch.LoadShedding.AspNetCore.csproj", "{95D50DC0-3988-4C6A-BCD4-0730C63978ED}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.Prometheus", "src\Farfetch.LoadShedding.Prometheus\Farfetch.LoadShedding.Prometheus.csproj", "{CBE1E761-9C83-41D4-99FF-7BF84DC569C5}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.BenchmarkTests", "tests\benchmark\Farfetch.LoadShedding.BenchmarkTests\Farfetch.LoadShedding.BenchmarkTests.csproj", "{2FE97B8B-EADA-4C91-A0B4-437252779982}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.IntegrationTests", "tests\integration-tests\Farfetch.LoadShedding.IntegrationTests\Farfetch.LoadShedding.IntegrationTests.csproj", "{4C07C337-E40E-4BD6-8F48-47072B07716B}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.PerformanceTests", "tests\performance-tests\Farfetch.LoadShedding.PerformanceTests\Farfetch.LoadShedding.PerformanceTests.csproj", "{A7D2B85A-AF4C-4CD2-92AA-99F2933A570D}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.AspNetCore.Tests", "tests\unit-tests\Farfetch.LoadShedding.AspNetCore.Tests\Farfetch.LoadShedding.AspNetCore.Tests.csproj", "{3CC2B32C-6931-4E2E-96B6-C7E0480A9C08}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Farfetch.LoadShedding.Tests", "tests\unit-tests\Farfetch.LoadShedding.Tests\Farfetch.LoadShedding.Tests.csproj", "{0BD82D67-CE42-4EB8-AF58-703AB143DF05}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{5B0CD1D6-DFFB-47B0-9429-DC58F22ABC50}" + ProjectSection(SolutionItems) = preProject + .editorconfig = .editorconfig + docker-compose.sample.yaml = docker-compose.sample.yaml + docker-compose.yaml = docker-compose.yaml + deploy\docker\Dockerfile = deploy\docker\Dockerfile + Makefile = Makefile + deploy\docker\Sample.Dockerfile = deploy\docker\Sample.Dockerfile + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {8853CFF3-987F-469A-9D3D-24C875B7DFB4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8853CFF3-987F-469A-9D3D-24C875B7DFB4}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8853CFF3-987F-469A-9D3D-24C875B7DFB4}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8853CFF3-987F-469A-9D3D-24C875B7DFB4}.Release|Any CPU.Build.0 = Release|Any CPU + {59E73202-B2B1-4098-94B3-348C853C6583}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {59E73202-B2B1-4098-94B3-348C853C6583}.Debug|Any CPU.Build.0 = Debug|Any CPU + {59E73202-B2B1-4098-94B3-348C853C6583}.Release|Any CPU.ActiveCfg = Release|Any CPU + {59E73202-B2B1-4098-94B3-348C853C6583}.Release|Any CPU.Build.0 = Release|Any CPU + {95D50DC0-3988-4C6A-BCD4-0730C63978ED}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {95D50DC0-3988-4C6A-BCD4-0730C63978ED}.Debug|Any CPU.Build.0 = Debug|Any CPU + {95D50DC0-3988-4C6A-BCD4-0730C63978ED}.Release|Any CPU.ActiveCfg = Release|Any CPU + {95D50DC0-3988-4C6A-BCD4-0730C63978ED}.Release|Any CPU.Build.0 = Release|Any CPU + {CBE1E761-9C83-41D4-99FF-7BF84DC569C5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {CBE1E761-9C83-41D4-99FF-7BF84DC569C5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {CBE1E761-9C83-41D4-99FF-7BF84DC569C5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {CBE1E761-9C83-41D4-99FF-7BF84DC569C5}.Release|Any CPU.Build.0 = Release|Any CPU + {2FE97B8B-EADA-4C91-A0B4-437252779982}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2FE97B8B-EADA-4C91-A0B4-437252779982}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2FE97B8B-EADA-4C91-A0B4-437252779982}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2FE97B8B-EADA-4C91-A0B4-437252779982}.Release|Any CPU.Build.0 = Release|Any CPU + {4C07C337-E40E-4BD6-8F48-47072B07716B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {4C07C337-E40E-4BD6-8F48-47072B07716B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {4C07C337-E40E-4BD6-8F48-47072B07716B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {4C07C337-E40E-4BD6-8F48-47072B07716B}.Release|Any CPU.Build.0 = Release|Any CPU + {A7D2B85A-AF4C-4CD2-92AA-99F2933A570D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A7D2B85A-AF4C-4CD2-92AA-99F2933A570D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A7D2B85A-AF4C-4CD2-92AA-99F2933A570D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A7D2B85A-AF4C-4CD2-92AA-99F2933A570D}.Release|Any CPU.Build.0 = Release|Any CPU + {3CC2B32C-6931-4E2E-96B6-C7E0480A9C08}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3CC2B32C-6931-4E2E-96B6-C7E0480A9C08}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3CC2B32C-6931-4E2E-96B6-C7E0480A9C08}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3CC2B32C-6931-4E2E-96B6-C7E0480A9C08}.Release|Any CPU.Build.0 = Release|Any CPU + {0BD82D67-CE42-4EB8-AF58-703AB143DF05}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0BD82D67-CE42-4EB8-AF58-703AB143DF05}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0BD82D67-CE42-4EB8-AF58-703AB143DF05}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0BD82D67-CE42-4EB8-AF58-703AB143DF05}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {E149B487-146C-4AA4-BFC9-4A42FBEA3E05} = {693C5879-B401-4398-BAA1-7FAF8B610BDD} + {8853CFF3-987F-469A-9D3D-24C875B7DFB4} = {BB0DD68C-4E4B-46DE-86B0-BA365448F9A2} + {6143CC7F-F882-4A90-9CAB-E11597429CC4} = {693C5879-B401-4398-BAA1-7FAF8B610BDD} + {D91E6E3A-4165-4934-9462-E62B04250932} = {693C5879-B401-4398-BAA1-7FAF8B610BDD} + {A9CDBF8F-C28F-48CC-A01F-6BE76A3DD5A2} = {693C5879-B401-4398-BAA1-7FAF8B610BDD} + {59E73202-B2B1-4098-94B3-348C853C6583} = {CDACCBBE-1F18-4DAE-A196-F5DE80EF8BEE} + {95D50DC0-3988-4C6A-BCD4-0730C63978ED} = {CDACCBBE-1F18-4DAE-A196-F5DE80EF8BEE} + {CBE1E761-9C83-41D4-99FF-7BF84DC569C5} = {CDACCBBE-1F18-4DAE-A196-F5DE80EF8BEE} + {2FE97B8B-EADA-4C91-A0B4-437252779982} = {D91E6E3A-4165-4934-9462-E62B04250932} + {4C07C337-E40E-4BD6-8F48-47072B07716B} = {6143CC7F-F882-4A90-9CAB-E11597429CC4} + {A7D2B85A-AF4C-4CD2-92AA-99F2933A570D} = {A9CDBF8F-C28F-48CC-A01F-6BE76A3DD5A2} + {3CC2B32C-6931-4E2E-96B6-C7E0480A9C08} = {E149B487-146C-4AA4-BFC9-4A42FBEA3E05} + {0BD82D67-CE42-4EB8-AF58-703AB143DF05} = {E149B487-146C-4AA4-BFC9-4A42FBEA3E05} + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4CAA1372-4CC5-492D-8CB3-3D426B8C4E6A} + EndGlobalSection +EndGlobal diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..861e1d6 --- /dev/null +++ b/Makefile @@ -0,0 +1,26 @@ +.PHONY: build unit-tests integration-tests performance-tests clean sample sample-clean + +build: + @docker compose up --build --no-start + +unit-tests: build + @docker compose up --abort-on-container-exit --exit-code-from unit-tests unit-tests + +integration-tests: build + @docker compose up --abort-on-container-exit --exit-code-from integration-tests integration-tests + +performance-tests: build + @docker compose up -d performance-tests-sample + @docker compose up -d performance-tests-sample-no-limiter + + @docker compose up --abort-on-container-exit --exit-code-from performance-tests performance-tests + +clean: + @docker compose down + @docker compose -f docker-compose.sample.yaml down + +sample: + @docker compose -f docker-compose.sample.yaml up -d + +sample-clean: + @docker compose -f docker-compose.sample.yaml down diff --git a/README.md b/README.md index df42a43..e285715 100644 --- a/README.md +++ b/README.md @@ -1 +1,125 @@ -# loadshedding \ No newline at end of file +# LoadShedding + +This library provides a set of capabilities to enable the service to deal with requests overload and resource limits to avoid outages and to ensure the best usage of the service capacity. + +## Documentation + +
+Getting Started + +- [Installation](docs/getting_started/installation.md) +- Register Limits + - [Concurrency Adaptative Limiter](docs/getting_started/register_concurrency_adaptative_limiter.md) +- [Samples](samples) + +
+ +## Samples + +For sample projects showcasing the application types, dependencies and features please check the [samples](samples/) folder. + +## Contributing + +Read our [contributing guidelines](CONTRIBUTING.md) to learn about our development process, how to propose bugfixes and improvements, and how to build and test your changes. + +### Requirements + +- Makefile +- .NET 6 +- Docker +- Docker Compose + +#### Makefile - Simplify Tests and Build commands + +**Important: You must have the Make app installed and configured in your local environment.** + +With the makefile present in the Load-Shedding, the teams or engineers who want to contribute to the development of the Load-Shedding library, have a simplified way to run the tests and build the project, the commands are shown below and will wrap and use the same commands that the ones used in the pipeline. + +Note: You should be in the root folder of the repository locally, to allow you run the commands. + +### Building + +To build the solution file, you can simply use this command. + +```bash +make build +``` + +#### Testing (Unit) + +To run all the tests, you can simply use this command. + +```bash +make unit-tests +``` + +The command will build the solution and then run all the tests present and marked as Unit tests. + +#### Testing (Integration) + +To run all the integration tests, you can simply use this command. + +```bash +make integration-tests +``` + +The command will build the solution and then run all the integration tests. + +### Benchmark + +Below, it is possible to see a benchmark analysis of the concurrency control mechanism, for this test multiple scenarios were created: + +- **Limiter_Default:** Directly tests the AdaptiveConcurrencyLimiter with default priority; +- **Limiter_RandomPriority:** Directly tests the AdaptiveConcurrencyLimiter with random priorities; +- **LimiterMiddleware_Default:** Tests the ConcurrencyLimiterMiddleware that uses the AdaptiveConcurrencyLimiter with Http Requests context with default priority. +- **LimiterMiddleware_RandomPriority:** Tests the ConcurrencyLimiterMiddleware that uses the AdaptiveConcurrencyLimiter with Http Requests context with random priorities. + +- **TaskQueueWith1000Items_EnqueueFixedPriority:** Tests the TaskQueue.Enqueue pre-loaded with 1000 items and a default priority; +- **TaskQueueEmpty_EnqueueRandomPriority:** Tests the TaskQueue.Enqueue with no elements; +- **TaskQueueWith1000Items_EnqueueRandomPriority:** Tests the TaskQueue.Enqueue pre-loaded with 1000 items and random priorities; +- **TaskQueueWith1000Items_Dequeue:** Tests the TaskQueue.Dequeue pre-loaded with 1000 items; +- **TaskQueue_EnqueueNewItem_LimitReached:** Tests the TaskQueue.Enqueue pre-loaded with 1000 items and the queue limit reached; + +#### Limiter + +``` ini +BenchmarkDotNet=v0.13.4, OS=Windows 10 (10.0.19044.2604/21H2/November2021Update) +Intel Core i7-10610U CPU 1.80GHz, 1 CPU, 8 logical and 4 physical cores +.NET SDK=7.0.103 + [Host] : .NET 6.0.14 (6.0.1423.7309), X64 RyuJIT AVX2 + Job-VLMTWN : .NET 6.0.14 (6.0.1423.7309), X64 RyuJIT AVX2 + +IterationCount=10 +``` + +| Method | Mean | Error | StdDev | Min | Max | Rank | Completed Work Items | Lock Contentions | Gen0 | Allocated | +|--------------------------------- |---------:|---------:|---------:|---------:|---------:|-----:|---------------------:|-----------------:|-------:|----------:| +| Limiter_Default | 354.4 ns | 6.22 ns | 3.25 ns | 349.2 ns | 358.4 ns | 1 | 0.0000 | - | 0.1450 | 608 B | +| Limiter_RandomPriority | 366.3 ns | 4.82 ns | 2.52 ns | 363.6 ns | 369.6 ns | 2 | 0.0000 | - | 0.1450 | 608 B | +| LimiterMiddleware_Default | 436.6 ns | 28.55 ns | 16.99 ns | 416.7 ns | 471.4 ns | 3 | 0.0000 | - | 0.1855 | 776 B | +| LimiterMiddleware_RandomPriority | 468.7 ns | 6.55 ns | 3.90 ns | 463.7 ns | 475.3 ns | 4 | 0.0000 | - | 0.2027 | 848 B | + +##### TaskQueue + +``` ini +BenchmarkDotNet=v0.13.4, OS=Windows 10 (10.0.19044.2604/21H2/November2021Update) +Intel Core i7-10610U CPU 1.80GHz, 1 CPU, 8 logical and 4 physical cores +.NET SDK=7.0.103 + [Host] : .NET 6.0.14 (6.0.1423.7309), X64 RyuJIT AVX2 + Job-THBOTE : .NET 6.0.14 (6.0.1423.7309), X64 RyuJIT AVX2 + +InvocationCount=1 IterationCount=10 UnrollFactor=1 + +``` + +| Method | Mean | Error | StdDev | Min | Max | Rank | Completed Work Items | Lock Contentions | Allocated | +|--------------------------------------------- |----------:|----------:|----------:|---------:|----------:|-----:|---------------------:|-----------------:|----------:| +| TaskQueueWith1000Items_EnqueueFixedPriority | 11.311 μs | 3.8278 μs | 2.2779 μs | 6.800 μs | 14.800 μs | 4 | - | - | 896 B | +| TaskQueueEmpty_EnqueueRandomPriority | 6.700 μs | 3.8089 μs | 2.2666 μs | 4.800 μs | 11.800 μs | 2 | - | - | 896 B | +| TaskQueueWith1000Items_EnqueueRandomPriority | 3.650 μs | 0.3540 μs | 0.1852 μs | 3.400 μs | 4.000 μs | 1 | - | - | 896 B | +| TaskQueueWith1000Items_Dequeue | 5.500 μs | 1.0912 μs | 0.5707 μs | 4.600 μs | 6.400 μs | 2 | - | - | 704 B | +| TaskQueue_EnqueueNewItem_LimitReached | 8.111 μs | 2.2848 μs | 1.3596 μs | 7.000 μs | 10.500 μs | 3 | - | - | 1144 B | + +#### Conclusion + +In all the scenarios the time added to the execution pipeline is very small and the impact caused by the limiter and task queue can be ignored. diff --git a/data/grafana/dashboards/http_loadshedding.json b/data/grafana/dashboards/http_loadshedding.json new file mode 100644 index 0000000..49f3475 --- /dev/null +++ b/data/grafana/dashboards/http_loadshedding.json @@ -0,0 +1,1987 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "$source", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "HTTP Load Metrics", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": "$source", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 8, + "panels": [], + "targets": [ + { + "datasource": "$source", + "refId": "A" + } + ], + "title": "General", + "type": "row" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-green", + "value": 0 + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 55, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "(sum(http_requests_concurrency_items_total{job=\"$application\", instance=~\"$instance\"}) / sum(http_requests_concurrency_limit_total{job=\"$application\", instance=~\"$instance\"}))", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Concurrency Usage", + "type": "gauge" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-green", + "value": 0 + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 3, + "y": 1 + }, + "id": 59, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "(sum(http_requests_queue_items_total{job=\"$application\", instance=~\"$instance\"}) / sum(http_requests_queue_limit_total{job=\"$application\", instance=~\"$instance\"}))", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Queue Usage", + "type": "gauge" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 6, + "y": 1 + }, + "id": 54, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(http_requests_task_processing_time_seconds_count{job=\"$application\", instance=~\"$instance\"}[$__rate_interval]))", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Task Throughput", + "type": "stat" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "green", + "mode": "fixed" + }, + "mappings": [], + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 60, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "(sum(rate(http_requests_task_processing_time_seconds_sum{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) / (sum(rate(http_requests_task_processing_time_seconds_count{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])))) > 0", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Task Latency (Avg)", + "type": "stat" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 12, + "y": 1 + }, + "id": 61, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(http_requests_rejected_total{job=\"$application\", instance=~\"$instance\"}[$__rate_interval]))", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Rejection Rate", + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "stat" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 1 + }, + "id": 47, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "sum(http_requests_concurrency_limit_total{job=\"$application\", instance=~\"$instance\"})", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Concurrency Limit", + "type": "stat" + }, + { + "datasource": "$source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 48, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": false, + "expr": "sum(http_requests_queue_limit_total{job=\"$application\", instance=~\"$instance\"})", + "instant": true, + "legendFormat": "Concurrency Usage", + "range": false, + "refId": "A" + } + ], + "title": "Queue Limit", + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 44, + "panels": [], + "title": "Details", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 37, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(http_requests_concurrency_items_total{job=\"$application\", instance=~\"$instance\"}) by (job, priority)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Priority: {{priority}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Concurrency Usage / Priority", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:703", + "format": "short", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:704", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 8 + }, + "hiddenSeries": false, + "id": 38, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(http_requests_queue_items_total{job=\"$application\", instance=~\"$instance\"}) by (job,priority)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Priority: {{priority}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Queue Usage / Priority", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 31, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(http_requests_task_processing_time_seconds_count{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (method, priority)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{method}} {{priority}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Task Execution / Second (Success)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "reqps", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 18, + "y": 8 + }, + "hiddenSeries": false, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "(sum(http_requests_concurrency_items_total{job=\"$application\", instance=~\"$instance\"}) by (job, instance)) / sum(http_requests_concurrency_limit_total{job=\"$application\", instance=~\"$instance\"}) by (job, instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Concurrency Usage / Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 57, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(http_requests_concurrency_limit_total{job=\"$application\", instance=~\"$instance\"}) by (job)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Limit", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "$source" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(http_requests_concurrency_items_total{job=\"$application\", instance=~\"$instance\"}) by (job)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Usage", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Concurrency Usage Total", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 17 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(http_requests_queue_limit_total{job=\"$application\", instance=~\"$instance\"}) by (job)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Limit", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "$source" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(http_requests_queue_items_total{job=\"$application\", instance=~\"$instance\"}) by (job)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Usage", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Queue Usage Total", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 17 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(http_requests_rejected_total{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (priority, method)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{method}} - {{priority}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Rejection Rate / Priority", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "reqps", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 18, + "y": 17 + }, + "hiddenSeries": false, + "id": 32, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(process_cpu_seconds_total{job=\"$application\", instance=~\"$instance\"}[30s]) * 100) by (instance)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "CPU", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 42, + "panels": [], + "title": "Latencies", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 4, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 62, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.5, sum(rate(http_requests_task_processing_time_seconds_bucket{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (le, priority))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{method}} {{priority}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Task Execution Time - P50", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 4, + "x": 4, + "y": 27 + }, + "hiddenSeries": false, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.90, sum(rate(http_requests_task_processing_time_seconds_bucket{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (le, priority))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{priority}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Task Execution Time - P90", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 4, + "x": 8, + "y": 27 + }, + "hiddenSeries": false, + "id": 45, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": "$source", + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(http_requests_task_processing_time_seconds_bucket{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (le, priority))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{method}} {{priority}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Task Execution Time - P99", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 4, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 35, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$source" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.5, sum(rate(http_requests_queue_time_seconds_bucket{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (le, priority))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{priority}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Queue Time P50", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 4, + "x": 16, + "y": 27 + }, + "hiddenSeries": false, + "id": 63, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$source" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.90, sum(rate(http_requests_queue_time_seconds_bucket{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (le, priority))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{priority}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Queue Time P90", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$source", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 4, + "x": 20, + "y": 27 + }, + "hiddenSeries": false, + "id": 39, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.3.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$source" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(rate(http_requests_queue_time_seconds_bucket{job=\"$application\", instance=~\"$instance\"}[$__rate_interval])) by (le, priority))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Priority {{priority}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Queue Time P99", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "convertFieldType", + "options": {} + } + ], + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "1m", + "schemaVersion": 37, + "style": "dark", + "tags": [ + "kubernetes" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "LoadSheddingMetrics", + "value": "LoadSheddingMetrics" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "source", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$source", + "definition": "label_values(http_requests_concurrency_limit_total,job)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "application", + "options": [], + "query": { + "query": "label_values(http_requests_concurrency_limit_total,job)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "$source", + "definition": "label_values(http_requests_concurrency_limit_total,instance)", + "hide": 0, + "includeAll": true, + "multi": true, + "name": "instance", + "options": [], + "query": { + "query": "label_values(http_requests_concurrency_limit_total,instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "http_loadshedding", + "uid": "http_loadshedding", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/data/grafana/provisioning/dashboards/all.yml b/data/grafana/provisioning/dashboards/all.yml new file mode 100644 index 0000000..210347f --- /dev/null +++ b/data/grafana/provisioning/dashboards/all.yml @@ -0,0 +1,6 @@ +- name: 'default' # name of this dashboard configuration (not dashboard itself) + org_id: 1 # id of the org to hold the dashboard + folder: '' # name of the folder to put the dashboard (http://docs.grafana.org/v5.0/reference/dashboard_folders/) + type: 'file' # type of dashboard description (json files) + options: + folder: '/var/lib/grafana/dashboards' # where dashboards are \ No newline at end of file diff --git a/data/grafana/provisioning/datasources/all.yml b/data/grafana/provisioning/datasources/all.yml new file mode 100644 index 0000000..d195798 --- /dev/null +++ b/data/grafana/provisioning/datasources/all.yml @@ -0,0 +1,9 @@ +datasources: +- access: 'prometheus' + editable: true + is_default: true + name: 'LoadSheddingMetrics' + org_id: 1 + type: 'prometheus' + url: 'http://prometheus:9090' + version: 1 \ No newline at end of file diff --git a/data/prometheus/prometheus.yml b/data/prometheus/prometheus.yml new file mode 100644 index 0000000..91a2116 --- /dev/null +++ b/data/prometheus/prometheus.yml @@ -0,0 +1,36 @@ +# my global config +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # By default, scrape targets every 15 seconds. + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'codelab-monitor' + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first.rules" + # - "second.rules" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'sample-api' + metrics_path: /metrics + scrape_interval: 5s + static_configs: + - targets: ['host.docker.internal:5261'] \ No newline at end of file diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile new file mode 100644 index 0000000..6b90c7b --- /dev/null +++ b/deploy/docker/Dockerfile @@ -0,0 +1,111 @@ +ARG FROM_REPO=mcr.microsoft.com/dotnet +ARG DOTNET_SDK_IMAGE=sdk:8.0-jammy +ARG DOTNET_RUNTIME_IMAGE=runtime:8.0-jammy +ARG ASPNET_RUNTIME_IMAGE=aspnet:8.0-jammy +ARG RUNTIME_IDENTIFIER=linux-x64 + +## +## sdk-image - Has the .net core SDK required to build and run tests +## +FROM ${FROM_REPO}/${DOTNET_SDK_IMAGE} as sdk-image + +## +## runtime-image - Has the .net core runtime, required to run the service +## +FROM ${FROM_REPO}/${DOTNET_RUNTIME_IMAGE} as runtime-image + +## +## aspnet-runtime-image - Has the asp.net core runtime, required to run the web applications +## +FROM ${FROM_REPO}/${ASPNET_RUNTIME_IMAGE} AS aspnet-runtime-image + +## +## Building +## +FROM sdk-image AS build + +ARG RUNTIME_IDENTIFIER + +RUN mkdir /app +WORKDIR /app + +# 1 - Copy sln and csprojs to make restore faster - Optional +COPY LoadShedding.sln LoadShedding.sln + +# 1.1 Library +COPY src/*/*.csproj ./ +RUN for file in $(ls *.csproj); do mkdir -p src/${file%.*}/ && mv $file src/${file%.*}/; done + +# 1.2 Unit tests +COPY tests/unit-tests/*/*.csproj ./ +RUN for file in $(ls *.csproj); do mkdir -p tests/unit-tests/${file%.*}/ && mv $file tests/unit-tests/${file%.*}/; done + +# 1.3 Integration tests +COPY tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Farfetch.LoadShedding.IntegrationTests.csproj tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Farfetch.LoadShedding.IntegrationTests.csproj + +# 1.4 Perfornance tests +COPY tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Farfetch.LoadShedding.PerformanceTests.csproj tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Farfetch.LoadShedding.PerformanceTests.csproj + +# 1.5 - Restore nuget packages +RUN dotnet restore LoadShedding.sln -r ${RUNTIME_IDENTIFIER} + +# 2 - Copy all files +COPY . . + +# 3 - Build +RUN dotnet build --no-restore --configuration Release LoadShedding.sln +RUN mkdir /reports + +# 5 - Publish performance tests sample + +RUN dotnet publish tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Farfetch.LoadShedding.PerformanceTests.csproj -c Release -o /out --no-restore + +## +## Unit Tests +## +FROM sdk-image AS unit-tests + +COPY --from=build /app /app +COPY --from=build /.nuget /.nuget + +WORKDIR /app + +RUN dotnet test LoadShedding.sln --filter Category=Unit --configuration Release + +# +# Integration Tests +# +FROM sdk-image AS integration-tests +ENV ASPNETCORE_ENVIRONMENT=Docker + +COPY --from=build /app /app +COPY --from=build /.nuget /.nuget + +WORKDIR /app + +CMD ["dotnet", "test", "--no-restore", "tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Farfetch.LoadShedding.IntegrationTests.csproj", "--logger", "trx", "--results-directory", "/reports"] + +# +# Performance Tests Sample +# +FROM aspnet-runtime-image AS performance-tests-sample +ENV ASPNETCORE_ENVIRONMENT=Docker + +COPY --from=build /.nuget /.nuget +COPY --from=build /out /app + +WORKDIR /app + +ENV ASPNETCORE_URLS=http://+:9025 \ + ASPNETCORE_ENVIRONMENT=Docker + +CMD dotnet Farfetch.LoadShedding.PerformanceTests.dll + +## +## Jmeter Performance Tests +## +FROM justb4/jmeter:5.5 As performance-tests +COPY --from=build /app/tests/performance-tests/AdaptiveLimiterTests.jmx /app +WORKDIR /app + +CMD ["jmeter", "-Jjmeter.save.saveservice.output_format=xml", "-JServerEndpoint=performance-tests-sample", "-JServerEndpointNoLimiter=performance-tests-sample-no-limiter", "-n", "-t", "AdaptiveLimiterTests.jmx", "-l", "results.jtl"] diff --git a/deploy/docker/Sample.Dockerfile b/deploy/docker/Sample.Dockerfile new file mode 100644 index 0000000..f436bfd --- /dev/null +++ b/deploy/docker/Sample.Dockerfile @@ -0,0 +1,51 @@ +ARG FROM_REPO=mcr.microsoft.com/dotnet +ARG DOTNET_SDK_IMAGE=sdk:sdk:8.0-jammy +ARG ASPNET_RUNTIME_IMAGE=aspnet:aspnet:8.0-jammy +ARG RUNTIME_IDENTIFIER=linux-x64 + +FROM ${FROM_REPO}/${DOTNET_SDK_IMAGE} as sdk-image +FROM ${FROM_REPO}/${ASPNET_RUNTIME_IMAGE} as runtime-image + +## +## Build +## +FROM sdk-image AS build + +ARG RUNTIME_IDENTIFIER + +RUN mkdir /app +WORKDIR /app + +# 1 - Copy projects +COPY samples/Samples.WebApi/Samples.WebApi.csproj samples/Samples.WebApi/ +COPY ./*/*.csproj ./ + +#Restore original file paths +RUN for file in $(ls *.csproj); do mkdir -p ./${file%.*}/ && mv $file ./${file%.*}/ && echo $file; done + +# 1.1 - Restore packages +RUN dotnet restore samples/Samples.WebApi/Samples.WebApi.csproj -r $RUNTIME_IDENTIFIER + +# 2 - Copy all files +COPY . . + +# 3 - Build +RUN dotnet build -c Release samples/Samples.WebApi/Samples.WebApi.csproj + +## +## Publish +## +FROM build AS publish +RUN dotnet publish samples/Samples.WebApi/Samples.WebApi.csproj --no-build -c Release -o /out + +## +## Run +## +FROM runtime-image + +COPY --from=publish /out /out +WORKDIR /out +ENV ASPNETCORE_URLS=http://+:5261 + +COPY --from=publish /out . +ENTRYPOINT ["dotnet", "Samples.WebApi.dll"] \ No newline at end of file diff --git a/docker-compose.sample.yaml b/docker-compose.sample.yaml new file mode 100644 index 0000000..882b427 --- /dev/null +++ b/docker-compose.sample.yaml @@ -0,0 +1,44 @@ +version: "3.4" + +services: + samples-webapi: + container_name: loadshedding-samples-webapi + image: loadshedding-samples-webapi + ports: + - "5261:5261" + environment: + - MongoConnectionString=mongodb://mongo:27017/loadshedding + build: + context: . + dockerfile: deploy/docker/Samples.Dockerfile + depends_on: + - mongo + + prometheus: + image: prom/prometheus:v2.42.0 + container_name: loadshedding-prometheus + ports: + - "9090:9090" + volumes: + - ./data/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml + command: + - "--config.file=/etc/prometheus/prometheus.yml" + extra_hosts: + - "host.docker.internal:host-gateway" + + grafana: + image: grafana/grafana:9.3.6 + container_name: loadshedding-grafana + ports: + - "3000:3000" + volumes: + - ./data/grafana/provisioning:/etc/grafana/provisioning + - ./data/grafana/dashboards:/var/lib/grafana/dashboards + depends_on: + - prometheus + + mongo: + image: mongo:4.2.0 + container_name: loadshedding-mongodb + ports: + - "27017:27017" diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..6017379 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,44 @@ +version: "3.4" + +services: + unit-tests: + container_name: loadshedding-unit-tests + image: loadshedding-unit-tests + build: + context: . + dockerfile: deploy/docker/Dockerfile + target: unit-tests + + integration-tests: + container_name: loadshedding-integration-tests + image: loadshedding-integration-tests + build: + context: . + dockerfile: deploy/docker/Dockerfile + target: integration-tests + + performance-tests-sample: + container_name: loadshedding-performance-tests-sample + image: loadshedding-performance-tests-sample + build: + context: . + dockerfile: deploy/docker/Dockerfile + target: performance-tests-sample + + performance-tests-sample-no-limiter: + container_name: loadshedding-performance-tests-sample-no-limiter + image: loadshedding-performance-tests-sample-no-limiter + environment: + - UseLoadShedding=false + build: + context: . + dockerfile: deploy/docker/Dockerfile + target: performance-tests-sample + + performance-tests: + container_name: loadshedding-performance-tests + image: loadshedding-performance-tests + build: + context: . + dockerfile: deploy/docker/Dockerfile + target: performance-tests \ No newline at end of file diff --git a/docs/getting_started/installation.md b/docs/getting_started/installation.md new file mode 100644 index 0000000..c60a9c7 --- /dev/null +++ b/docs/getting_started/installation.md @@ -0,0 +1,25 @@ +# LoadShedding Installation + +To start using the LoadShedding library, just install the following package to the Startup Project: + +```bash +dotnet add package Farfetch.LoadShedding.AspNetCore +``` + +## How to Use + +Add the LoadShedding services by calling the `AddLoadShedding` extension. + +```csharp +services.AddLoadShedding(); +``` + +Use the `UseLoadShedding` extension method by extending the `IApplicationBuilder` interface. + +```csharp +app.UseLoadShedding(); +``` + +------ + +Go back to [Documentation Index](/README.md#documentation) diff --git a/docs/getting_started/register_concurrency_adaptative_limiter.md b/docs/getting_started/register_concurrency_adaptative_limiter.md new file mode 100644 index 0000000..da43156 --- /dev/null +++ b/docs/getting_started/register_concurrency_adaptative_limiter.md @@ -0,0 +1,246 @@ +# Register Concurrency Adaptative Limiter + +The concurrency adaptative limiter provides a capacity to auto-adjust the accepted traffic based on the runtime performance, ensuring that latencies remain low. + +![Concurrency Adaptative Limiter](../resources/concurrency_limiter_graph.png) + +As can be seen in the previous image (adapted from [Performance Under Load Article](https://netflixtechblog.medium.com/performance-under-load-3e6fa9a60581)): + +* the requests are processed since the number of capacity + the number of queue slots is not reached. +* as soon as the maximum concurrency limit is reached (possible to configure), the requests will enter a queue. +* the requests that are waiting in the queue, will be released by a FIFO (First In, First Out) methodology. +* as soon as the maximum queue size is reached, the system will automatically reject the following requests, returning a 503 - Service Unavailable error. +* the latency will be kept low independent of the number of requests. +* the capacity/concurrency limit will be automatically calculated through some algorithms taking into account the service performance degradation. + +## How to use it + +### Base Configuration + +Install all the needed services by calling `IServiceCollection.AddLoadShedding`. + +```csharp +app.AddLoadShedding(); +``` + +Extend the `IApplicationBuilder` using the `UseLoadShedding` extension method. + +```csharp +app.UseLoadShedding(); +``` + +### Options Configuration + +It is possible to have access to additional configurations when registering the services. + +```csharp +services.AddLoadShedding((provider, options) => +{ + options.AdaptativeLimiter.ConcurrencyOptions.MinQueueSize = 10; + options.AdaptativeLimiter.UseHeaderPriorityResolver(); + options.SubscribeEvents(events => + { + events.ItemEnqueued.Subscribe(args => Console.WriteLine($"QueueLimit: {args.QueueLimit}, QueueCount: {args.QueueCount}")); + events.ItemDequeued.Subscribe(args => Console.WriteLine($"QueueLimit: {args.QueueLimit}, QueueCount: {args.QueueCount}")); + events.ItemProcessing.Subscribe(args => Console.WriteLine($"ConcurrencyLimit: {args.ConcurrencyLimit}, ConcurrencyItems: {args.ConcurrencyCount}")); + events.ItemProcessed.Subscribe(args => Console.WriteLine($"ConcurrencyLimit: {args.ConcurrencyLimit}, ConcurrencyItems: {args.ConcurrencyCount}")); + events.Rejected.Subscribe(args => Console.Error.WriteLine($"Item rejected with Priority: {args.Priority}")); + }); +}); +``` + +By default, the following `ConcurrencyOptions` values will be used: + +| Option | Description | Default Value | +| ------ | -------- | -------- | +| MinConcurrencyLimit | The minimum number of concurrent requests allowed | 5 | +| InitialConcurrencyLimit | The starting number of concurrent requests allowed. This may be adjusted up or down based on the performance of the system | 5 | +| MaxConcurrencyLimit | The maximum number of concurrent requests allowed | 500 | +| Tolerance | The level of flexibility in adjusting the concurrency limit. It indicates how much change in the minimum latency is acceptable before lowering the concurrency limit threshold. A high tolerance means the system can adjust the concurrency limit more freely, while a low tolerance means the limit will be maintained more strictly. For example, a value of 2.0 means a 2x increase in latency is acceptable | 1.5 | +| MinQueueSize | The minimum number of requests that must be waiting in the queue before new requests can be processed | 20 | +| InitialQueueSize | The starting number of requests in the queue | 20 | +| QueueTimeoutInMs | The queue waiting timeout, when the timeout is reached the task will be canceled and will throw an OperationCanceledException. | Infinite | + +**Note:** These default values were defined based on: + +* investigation of the [Netflix Concurrency Limit](https://github.com/Netflix/concurrency-limits) tool. +* having a huge margin of tolerance: accepting 500 requests simultaneously (and 50 more going to the queue - initially). + +On the other hand, if needed, these settings can be completely overridden by using the `ConcurrencyOptions` property: + +```csharp + +services.AddLoadShedding((provider, options) => +{ + options.AdaptativeLimiter.ConcurrencyOptions.MinConcurrencyLimit = 5; + options.AdaptativeLimiter.ConcurrencyOptions.InitialConcurrencyLimit = 5; + options.AdaptativeLimiter.ConcurrencyOptions.InitialQueueSize = 50; + options.AdaptativeLimiter.ConcurrencyOptions.Tolerance = 2; + options.AdaptativeLimiter.ConcurrencyOptions.QueueTimeoutInMs = 60000; +}); +``` + +When defining the options values, the following criteria need to be accomplished: + +* MinConcurrencyLimit, InitialConcurrencyLimit, MaxConcurrencyLimit, MinQueueSize, and MinQueueSize >= 1 +* Tolerance > 1 +* MaxConcurrencyLimit > MinConcurrencyLimit +* InitialConcurrencyLimit >= MinConcurrencyLimit && MaxConcurrencyLimit >= InitialConcurrencyLimit +* InitialQueueSize >= MinQueueSize + +### Events Listener Configuration + +It is possible to monitor the service performance by subscribing internal events: + +* QueueLimitChanged: invoked whenever the queue limit is changed. +* QueueItemsCountChanged: invoked whenever an item is enqueued or dequeued. +* ConcurrencyLimitChanged: invoked whenever the concurrency limit is changed. +* ConcurrentItemsCountChanged: invoked whenever an item is being processed or it is finished. +* ItemEnqueued: invoked whenever a task is enqueued. +* ItemDequeued: invoked whenever a task is dequeued. +* Rejected: invoked whenever there are rejected requests - queue limit is reached. + +```csharp +services.AddLoadShedding((provider, options) => +{ + options.SubscribeEvents(events => + { + events.ItemEnqueued.Subscribe(args => Console.WriteLine($"QueueLimit: {args.QueueLimit}, QueueCount: {args.QueueCount}")); + events.ItemDequeued.Subscribe(args => Console.WriteLine($"QueueLimit: {args.QueueLimit}, QueueCount: {args.QueueCount}")); + events.ItemProcessing.Subscribe(args => Console.WriteLine($"ConcurrencyLimit: {args.ConcurrencyLimit}, ConcurrencyItems: {args.ConcurrencyCount}")); + events.ItemProcessed.Subscribe(args => Console.WriteLine($"ConcurrencyLimit: {args.ConcurrencyLimit}, ConcurrencyItems: {args.ConcurrencyCount}")); + events.Rejected.Subscribe(args => Console.Error.WriteLine($"Item rejected with Priority: {args.Priority}")); + }); +}); +``` + +### Custom Queue Size Calculator Configuration + +Calculating the queue size has the main goal to find the maximum value of requests allowed to be in the queue. + +The default queue size calculator is based on the square root of the concurrency limit value. + +Optionally, the strategy can be overridden by: + +#### 1 - Implement the IQueueSizeCalculator interface + +```csharp + public class CustomQueueSizeCalculator : IQueueSizeCalculator + { + public int CalculateQueueSize(IConcurrencyContext context) + { + // Implement the Calculate Queue Size logic here + + return default; + } + } +``` + +#### 2 - Use a custom QueueSizeCalculator + +```csharp +services.AddLoadShedding((provider, options) => +{ + options.AdaptativeLimiter.QueueSizeCalculator = new CustomQueueSizeCalculator(); +}); +``` + +### Request Prioritization Configuration + +It is possible to configure the settings to establish priority resolvers for requests. + +At present, only one strategy is supported, which means that solely the most recently configured strategy will be implemented. + +#### Http Header Priority Resolver + +With the extension `UseHeaderPriorityResolver` it will automatically convert the value of the HTTP Header `X-Priority` to the request priority. + +The allowed values are: critical, normal and non-critical + +```csharp +services.AddLoadShedding((provider, options) => +{ + options.AdaptativeLimiter.UseHeaderPriorityResolver(); +}); +``` + +#### Endpoint Priority Resolver + +With the extension `UseEndpointPriorityResolver` it will automatically load the Priority defined for the endpoint from the `EndpointPriorityAttribute`. + +```csharp +services.AddLoadShedding((provider, options) => +{ + options.AdaptativeLimiter.UseEndpointPriorityResolver(); +}); +``` + +Also, add `EndpointPriorityAttribute` in the action. + +```csharp +[HttpGet] +[Route("people")] +[EndpointPriority(Priority.Critical)] +public async Task GetPeopleAsync() +{ + return this.Ok(new[] + { + new Person + { + Id = 1, + Age = 18, + UserName = "john.doe" + } + }); +} +``` + +### Including Metrics + +The library has the option to export adaptative limiter metrics to Prometheus. + +#### Install Package + +```bash +dotnet add package Farfetch.LoadShedding.Prometheus +``` + +#### Configure + +Use the `LoadSheddingOptions` extension method `AddMetrics()`. +The metrics includes the label `method` that describes the HTTP method. For this value to be correctly parsed, the `HTTPContextAccessor` should be included otherwise the `method` label will output the value `UNKNOWN`. + +```csharp +builder.Services.AddHttpContextAccessor(); + +services.AddLoadShedding((provider, options) => +{ + options.AddMetrics(); +}); +``` + +`AddMetrics` has additional options that supports renaming and enable/disable specific metrics. + +```csharp +options.AddMetrics(options => +{ + options.QueueLimit.Enabled = false; + options.ConcurrencyLimit.Enabled = false; + options.RequestRejected.Enabled = false; +}); +``` + +#### Reference Documentation + +| Metric Name | Metric Description | Metric Type | Labels | +| ----------- | ------------------ | ----------- | ------ | +| http_requests_concurrency_items_total | The current number of executions concurrently | gauge | | +| http_requests_concurrency_limit_total | The current concurrency limit | gauge | | +| http_requests_queue_items_total | The current number of items waiting to be processed in the queue | gauge | method (HTTP method of the request), priority (critical, noncritical, normal) | +| http_requests_queue_limit_total | The current queue limit size | gauge | | +| http_requests_queue_time_seconds | The time each request spent in the queue until its executed | histogram | method (HTTP method of the request), priority (critical, noncritical, normal) | +| http_requests_rejected_total | The number of requests rejected because the queue limit is reached | counter | method (HTTP method of the request), priority (critical, noncritical, normal), reason (max_queue_items, queue_timeout) | + +------ + +Go back to [Documentation Index](/README.md#documentation) diff --git a/docs/resources/concurrency_limiter_graph.png b/docs/resources/concurrency_limiter_graph.png new file mode 100644 index 0000000..1f2374c Binary files /dev/null and b/docs/resources/concurrency_limiter_graph.png differ diff --git a/rfcs/0000-rfc-template.md b/rfcs/0000-rfc-template.md new file mode 100644 index 0000000..08d36e1 --- /dev/null +++ b/rfcs/0000-rfc-template.md @@ -0,0 +1,99 @@ +--- +- Feature Name: (fill me in with a unique ident, `my_feature_name`) +- Start Date: (fill me in with today's date, YYYY-MM-DD) +- RFC MR(s): [loadshedding/rfcs#0](https://github.com/Farfetch/loadshedding/pull/0) +- Issue(s): [loadshedding/rfcs#0](https://github.com/Farfetch/loadshedding/issues/0) +--- + +# Summary +[summary]: #summary + +One paragraph explanation of the feature. + +# Motivation +[motivation]: #motivation + +Why are we doing this? What use cases does it support? What is the expected outcome? + +# Guide-level explanation +[guide-level-explanation]: #guide-level-explanation + +Explain the proposal as if it was already included in the language and you were teaching it to another Rust programmer. That generally means: + +- Introducing new named concepts. +- Explaining the feature largely in terms of examples. +- Explaining how Rust programmers should *think* about the feature, and how it should impact the way they use Rust. It should explain the impact as concretely as possible. +- If applicable, provide sample error messages, deprecation warnings, or migration guidance. +- If applicable, describe the differences between teaching this to existing Rust programmers and new Rust programmers. +- Discuss how this impacts the ability to read, understand, and maintain Rust code. Code is read and modified far more often than written; will the proposed feature make code easier to maintain? + +For implementation-oriented RFCs (e.g. for compiler internals), this section should focus on how compiler contributors should think about the change, and give examples of its concrete impact. For policy RFCs, this section should provide an example-driven introduction to the policy, and explain its impact in concrete terms. + +# Reference-level explanation +[reference-level-explanation]: #reference-level-explanation + +This is the technical portion of the RFC. Explain the design in sufficient detail that: + +- Its interaction with other features is clear. +- It is reasonably clear how the feature would be implemented. +- Corner cases are dissected by example. + +The section should return to the examples given in the previous section, and explain more fully how the detailed proposal makes those examples work. + +# Drawbacks +[drawbacks]: #drawbacks + +Why should we *not* do this? + +# Rationale and alternatives +[rationale-and-alternatives]: #rationale-and-alternatives + +- Why is this design the best in the space of possible designs? +- What other designs have been considered and what is the rationale for not choosing them? +- What is the impact of not doing this? +- If this is a language proposal, could this be done in a library or macro instead? Does the proposed change make Rust code easier or harder to read, understand, and maintain? + +# Prior art +[prior-art]: #prior-art + +Discuss prior art, both the good and the bad, in relation to this proposal. +A few examples of what this can include are: + +- For language, library, cargo, tools, and compiler proposals: Does this feature exist in other programming languages and what experience have their community had? +- For community proposals: Is this done by some other community and what were their experiences with it? +- For other teams: What lessons can we learn from what other communities have done here? +- Papers: Are there any published papers or great posts that discuss this? If you have some relevant papers to refer to, this can serve as a more detailed theoretical background. + +This section is intended to encourage you as an author to think about the lessons from other languages, provide readers of your RFC with a fuller picture. +If there is no prior art, that is fine - your ideas are interesting to us whether they are brand new or if it is an adaptation from other languages. + +Note that while precedent set by other languages is some motivation, it does not on its own motivate an RFC. +Please also take into consideration that rust sometimes intentionally diverges from common language features. + +# Unresolved questions +[unresolved-questions]: #unresolved-questions + +- What parts of the design do you expect to resolve through the RFC process before this gets merged? +- What parts of the design do you expect to resolve through the implementation of this feature before stabilization? +- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC? + +# Future possibilities +[future-possibilities]: #future-possibilities + +Think about what the natural extension and evolution of your proposal would +be and how it would affect the language and project as a whole in a holistic +way. Try to use this section as a tool to more fully consider all possible +interactions with the project and language in your proposal. +Also consider how this all fits into the roadmap for the project +and of the relevant sub-team. + +This is also a good place to "dump ideas", if they are out of scope for the +RFC you are writing but otherwise related. + +If you have tried and cannot think of any future possibilities, +you may simply state that you cannot think of anything. + +Note that having something written down in the future-possibilities section +is not a reason to accept the current or a future RFC; such notes should be +in the section on motivation or rationale in this or subsequent RFCs. +The section merely provides additional information. \ No newline at end of file diff --git a/rfcs/0001-APPROVED-adaptative-concurrency-limit.md b/rfcs/0001-APPROVED-adaptative-concurrency-limit.md new file mode 100644 index 0000000..25921df --- /dev/null +++ b/rfcs/0001-APPROVED-adaptative-concurrency-limit.md @@ -0,0 +1,255 @@ +--- +- Feature Name: adaptative-concurrency-limit +- Start Date: 2022-05-11 +--- + +# Summary +[summary]: #summary + +The goal of this RFC is to have a solution to make services more resilient. By enabling the capability to auto-adjust the accepted traffic based on the runtime performance, therefore decreasing the possibility of downtime and outages. + +# Motivation +[motivation]: #motivation + +Currently, there are several ways to control the concurrency of a service: Kong plugins, load balancer configurations, and concurrency limit frameworks. + +But in those options, the limits are configurated through settings with static values, and every time rises the same question: How many requests can my service handle simultaneously? + +This occurs because it is very difficult to determine the service capacity since it can change in runtime and it depends on several factors: + +* The resource limits (Memory, CPU, Threads) +* The limit of each service dependency (Http Service, Database, Cache) +* The Network latency + +The concurrency limit of a system can be measured using Little’s law, which describes the service concurrency limit as the product of the throughput x average latency, and the excess of requests should be enqueued or rejected. + +It is possible to measure the service metrics in runtime. It enables the possibility of calculating and dynamically changing the limits on the fly to protect the services from outages. + +# Guide Implementation +[guide-level-explanation]: #guide-level-explanation + +The idea is to adapt the concurrency limit according to the service performance in runtime based on the average time spent executing the request. + +## Time Tracking + +First of all, it is important to collect the data that will be used to determine the performance degradation, each request, the round trip time should be tracked and accumulated, the number of requests should be increased and the MinRTT (Minimum Round Trip Time) could be updated if the duration of the request is less the current MinRTT. + +```csharp +totalTime += durationInMs; +numberOfExecutions++; +avgRTT = totalTime / numberOfExecutions; + +if (durationInMs < minRTT) + minRTT = durationInMs; +``` + +## Finding the Concurrency Limit + +It is possible to determine the limit using the TCP congestion control algorithm and to change the limit with the increase of the RTT (Round trip time). + +`NewConcurrencyLimit = (MinRTT * Tolerance / AvgRTT) * CurrentLimit + QueueSize` + +The limit should start at a small initial point and it will increase while the formula returns a positive value until finding a stable point. + +When there is a load in the system the average RTT will increase and the concurrency limit decreases to adapt and control the response times. + +![Concurrent Requests Limit](./resources/0001-concurrent-requests-limit.png) + +## How to find the max queue size? + +### Square Root queue size + +In this mode the queue size will be always the square root of the concurrency limit, It is the logic used by the Netflix concurrency limit because the square root is a small value that increases when the concurrency limit increases. + +This is a more rigid strategy and tends to reject more requests to control better the response times. + +### RPS Capacity + +This strategy uses the average RTT to calculate the number of samples accepted by second for each concurrency item accepted. + +`queueSize = (1000 / avgRTT) * ConcurrencyLimit` + +When the average time or the concurrency limit changes the queue size increases or decreases to maintain the average RTT controlled. + +## When the max capacity is reached + +If the concurrency limit is reached and all queue slot is in use the requests will be rejected using for that the status code [503 (ServiceUnavailable)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/503). + +## Partitions + +There are some scenarios when the limits can change according to the context like HTTP methods, endpoint priority, or relevance. + +To cover this scenario should be provided a way to configure the partition name per context, If it is not configurated it should use the global partition. + +## Request Prioritization + +The RFC related to the request prioritization feature is available [here](/rfcs/0003-APPROVED-request-prioritization.md). + +## Metrics + +To monitor the service performance and to support the auto-scaling in the case of overload a set of metrics should be provided by the concurrency limiter. + +### http_requests_queue_time_seconds + +This metric is a histogram that represents the time each request spent in the queue until its executed. + +| Label | Description | +| ----- | ----------- | +| method | The HTTP method of the request | +| priority | Priority of the rejected request (critical, normal, noncritical) | + +### http_requests_concurrency_limit_total + +This metric is a gauge that represents the current concurrency limit. + +### http_requests_concurrency_items_total + +This metric is a gauge that represents the current number of execution in concurrency. + +### http_requests_queue_limit_total + +This metric is a gauge that represents the current queue size. + +### http_requests_queue_items_total + +This metric is a gauge that represents the items waiting to be processed in the queue. + +| Label | Description | +| ----- | ----------- | +| method | The HTTP method of the request | +| priority | Priority of the rejected request (critical, normal, noncritical) | + +### http_requests_rejected_total + +This metric is a counter that represents the number of requests rejected because the queue limit is reached. + +| Label | Description | +| ----- | ----------- | +| method | The HTTP method of the request | +| priority | Priority of the rejected request (critical, normal, noncritical) | + +Below there is an example of a visualization of the metrics, it is possible to view when the limits are reached and the rejected items. + +![Concurrency Usage Metrics](./resources/0001-concurrency-usage-metrics.png) + +## Auto Scaling + +Using KEDA it is possible to use Prometheus metrics to trigger the creation of PODs and auto-scaling the application when there is an overload in the service. + +The http_requests_queue_items_total can help to auto-scaling the service, once the service has items in the queue it can trigger the scale-up of the application. + +# Reference Implementation +[reference-level-explanation]: #reference-level-explanation + +The proposal is to create a specific package for dealing with concurrency limiting that encapsulates all the logic to calculate the limit and also gives support for other types of settings like memory limit, CPU usage limit, and max allowed response times. + +## Core Implementation + +The idea is to create a core framework that implements a concurrency limiting in any desired code block, it will provide a builder to help create the limiter through parameters. + +The limiter instance will provide a method Execute where the function to execute will be placed. + +```csharp +var limiter = new ConcurrencyLimiterBuilder() + .WithOptions(options => + { + options.MinConcurrencyLimit = minLimit; + options.MaxConcurrencyLimit = maxLimit; + options.MinQueueSize = minQueueLimit; + }) + .WithSquareRootQueueSizeStrategy() + .Build(); + +await limiter.ExecuteAsync(() => next()); +``` + +So, with the core implementation, it will be possible to control the concurrency limit of any method or function. + +## AspNet Core Extension + +A new separated package will be created to implement the concurrency limit in middlewares and controller actions. + +### Middleware Extension + +An extension will be created to include the concurrency limiter in the request execution pipeline. + +```csharp + app.UseConcurrencyLimiter((provider, builder) => + { + builder + .WithOptions(options => + { + options.MinConcurrencyLimit = 20; + options.InitialConcurrencyLimit = 50; + options.InitialQueueSize = 50; + }) + .WithPartition(context => context.Request.Path) + .WithSquareRootQueueSizeStrategy() + .WithLogger(provider.GetRequiredService>()) + .WithMetricsListener(listener => + { + listener.OnChanged = metrics => + { + metricsHandler.MetricsChanged(metrics); + }; + + listener.OnReject = () => + { + metricsHandler.Reject(); + }; + }); + }); +``` + +### Controller Action Attributes + +Also, an attribute will be provided for the case of limit a specific endpoint. + +```csharp + [HttpGet] + [ConcurrencyLimiter(minLimit: 20, maxLimit: 2000, minQueueLimit: 20)] + public async Task> Get() + { + return await collection.Find(FilterDefinition.Empty).ToListAsync(); + } +``` + +## Open Source Extension + +Another solution is to contribute to the community and to create an open-source extension to [Microsoft Rate Limiting implementation](https://github.com/dotnet/aspnetcore/tree/main/src/Middleware/RateLimiting). + +```csharp +var options = new RateLimiterOptions(); +options.DefaultRejectionStatusCode = 503; +options.Limiter = new PartitionedAdaptativeConcurrencyLimiter(limitOptions => +{ + limitOptions.MinConcurrencyLimit = minLimit; + limitOptions.MaxConcurrencyLimit = maxLimit; + limitOptions.MinQueueSize = minQueueLimit; +}); + +app.UseRateLimiter(options); +``` + +# Drawbacks +[drawbacks]: #drawbacks + +It is a solution that needs to be maintained internally by a team even if it is open source. + +On the other hand, turning it open-source will enable the contribution of the dotnet community. + +# Rationale / Alternatives +[rationale-and-alternatives]: #rationale-and-alternatives + +The rationale for creating a dotnet library is to assist, and have and easy and simple way of having these techniques in services. + +Another alternative is to create load balancing or gateway plugins with this logic, it could be used in a layer above and it could be reused by any technology. + +# Learn More/References: + +* [Netflix - Performance Under Load](https://netflixtechblog.medium.com/performance-under-load-3e6fa9a60581) +* [Netflix Concurrency Limiter GitHub Repository](https://github.com/Netflix/concurrency-limits) +* [Vector Request Limiter](https://github.com/vectordotdev/vector/blob/master/rfcs/2020-04-06-1858-automatically-adjust-request-limits.md) +* [AIMD - TCP Congestion Control](https://en.wikipedia.org/wiki/Additive_increase/multiplicative_decrease) +* [RFC - Request Prioritization](/rfcs/0003-APPROVED-request-prioritization.md) +* [Load Shedding To Avoid Overload](https://aws.amazon.com/builders-library/using-load-shedding-to-avoid-overload/) \ No newline at end of file diff --git a/rfcs/0002-APPROVED-loadshedding-design.md b/rfcs/0002-APPROVED-loadshedding-design.md new file mode 100644 index 0000000..0affbdc --- /dev/null +++ b/rfcs/0002-APPROVED-loadshedding-design.md @@ -0,0 +1,68 @@ +--- +- Feature Name: loadshedding-design +- Start Date: 2022-12-12 +--- + +# Summary +[summary]: #summary + +LoadShedding is a set of techniques used by high performance web services to detect when there is a traffic congestion and identify the requests that should be served to prevent a service overload and cascading failures. + +# Motivation +[motivation]: #motivation + +Given the high-performance scenarios existent, where there are a lot of very important services in the critical flow, it is important to ensure resilience and avoid outages by detecting when there is an overload on the services and applying mechanisms to answer the maximum of requests without decrease the service quality, and also scale and redistribute the requests when the load is increasing. + +For that, the LoadShedding library was created to serve as an observer the internal service performance, detect the service limit from inside and reject the requests when the limit is reached. + +# Guide Implementation +[guide-level-explanation]: #guide-level-explanation + +The idea is to create a layer to encapsulate the request and track the service performance, machine resource usage and reject when it's not possible to receive the request. + +## How to Configure + +[configuration]: #configuration + +An extension will be provided to add a middleware to perform the logic to track, detect and control the request traffic. + +```csharp + app.UseLoadShedding((provider, builder) => + { + builder + .UseCPUUsageLimit(99) + .UseMemoryUsageLimit(99) + .UseAdaptativeLimiter(options => + { + options.PartitionResolver = (context) => context.Request.Path; + options.QueueSizeStrategy = QueueSizeStrategies.SquareRoot; + options.MinConcurrencyLimit = {{serviceSettings.Resilience.MaxInFlight.AdaptativeConcurrencyLimits.MinConcurrencyLimit}}; + options.MaxConcurrencyLimit = {{serviceSettings.Resilience.MaxInFlight.AdaptativeConcurrencyLimits.MaxConcurrencyLimit}}; + }) + .WithMetricsListener(listener => + { + listener.OnChanged = metrics => + { + metricsHandler.MetricsChanged(metrics); + }; + + listener.OnReject = () => + { + metricsHandler.Reject(); + }; + }); + }); +``` + +# Drawbacks +[drawbacks]: #drawbacks + +It is a solution that needs to be maintained internally by a team even if it is open source. +On the other hand, making it open source will enable the contribution of the dotnet community. + +# Rationale / Alternatives +[rationale-and-alternatives]: #rationale-and-alternatives + +The rationale for creating a dotnet library is to assist, and have and easy and simple way of having these techniques in services. + +Another alternative is to create load balancing or gateway plugins with this logic, it could be used in a layer above and it could be reused by any technology. \ No newline at end of file diff --git a/rfcs/0003-APPROVED-request-prioritization.md b/rfcs/0003-APPROVED-request-prioritization.md new file mode 100644 index 0000000..faac479 --- /dev/null +++ b/rfcs/0003-APPROVED-request-prioritization.md @@ -0,0 +1,191 @@ +--- +- Feature Name: request-prioritization +- Start Date: 2023-01-30 +--- + +# Summary +[summary]: #summary + +Since there is already a mechanism to automatically detect the service limits based on internal behavior and excessive requests are discarded, it becomes necessary to stipulate different priorities per request, with the objective of prioritizing more important requests or critical flows. + +# Motivation +[motivation]: #motivation + +Every service has a finite capacity to handle requests, these limitations are related to several factors such as: performance of dependencies, available resources (CPU and Memory). + +Once the service reaches its maximum capacity, it is important to have a way of defining which are the priority tasks (requests), so that it is possible to direct the available resources to what is most important. + +Thus, we can guarantee that in overload scenarios the most critical parts of the service will be less affected by requests of lower importance. + +# Guide Implementation +[guide-level-explanation]: #guide-level-explanation + +Once the concurrency limit is reached, the requests are added to a queue in an ordered way, regardless of their priority, until the maximum capacity of the queue is reached. + +The purpose of this RFC is to change this structure so that it is possible to categorize incoming requests by priority and to order their execution. + +## Queue Structure + +The idea is to create a structure with multiple chained queues so that the last item of a queue references the first item of the next lower priority queue, below there is an image that demonstrates this structure. + +![Queue Structure](./resources/0003-queue-format.drawio.png) + +## Queue Flow + +When a request arrives, the algorithm checks if the priority of the new request is greater than the item with the lowest priority, if so, the new request is added to the queue related to its priority and the last item is discarded, below there is an example of the execution flow. + +![Queue Substitution Flow](./resources/0003-queue-flow.drawio.png) + +**Note**: The flow above is only applied when the service is overloaded. + +## Prioritization Definition + +There are several strategies for defining request priorities, these strategies can be adopted case-by-case depending on the business rules related to each service. + +The accepted priority values are: + +- **non-critical**: The request doesn't affect directly the end user. i.e: Background jobs, Kafka consumer handlers. +- **normal**: The request may affect normal business operations, but it is possible to manage it by retrigging later or having a degraded experience for the end user. +- **critical**: The request is critical and if not fulfilled it will have a high impact on the business. i.e: prevent a sale, show an error to the user. + +### Endpoint Priority + +In this case, each endpoint has its internal priority defined by code. + +```csharp +[HttpGet] +[Route("WeatherForecast")] +[EndpointPriority(EndpointPriorities.Critical)] +public async Task> GetAll() +{ + ... +} +``` + +When the priority is defined in the endpoint level it overcomes the other strategies. + +### HTTP Header Priority + +An HTTP header (X-Priority) is sent defining the priority of the request. + +```bash +curl http://localhost:9999/WeatherForecast -H "X-Priority: critical" +``` + +In the LoadShedding library configuration the `WithHeaderPrioritizationStrategy` strategy should be configured. + +```csharp +app.UseLoadShedding((provider, configurator) => +{ + configurator.UseAdaptativeLimiter((builder) => + { + builder + .WithHeaderPrioritizationStrategy("X-Priority") + .WithOptions(options => + { + ... + }); + }); +}); +``` + +### User Claim Priority (Out of MVP) + +A specific user (token) may have a higher priority than others, in this case the priority is loaded from the user claims and the priority is applied. + +i.e: tenant_id, plan tier, client_id, etc. + +In the LoadShedding library configuration the `WithUserClaimPrioritizationStrategy` strategy should be configured. + +```csharp +app.UseLoadShedding((provider, configurator) => +{ + configurator.UseAdaptativeLimiter((builder) => + { + builder + .WithUserClaimPrioritizationStrategy("tenant_id") + .WithOptions(options => + { + ... + }); + }); +}); +``` + +### Custom Priority + +A factory is provided to allow the developers to define a function with a custom logic to resolve the request priority. + +```csharp +app.UseLoadShedding((provider, configurator) => +{ + configurator.UseAdaptativeLimiter((builder) => + { + builder + .WithPriorityResolver(context => + { + // Any custom logic + if (context.Request.Headers.TryGetValue("X-Priority", out var values)) + { + return Convert.ToInt32(values.First()); + } + + return 0; + }) + .WithOptions(options => + { + ... + }); + }); +}); +``` + +## Metrics + +There is already a metric to collect the number of enqueued items and the idea is to add a new label priority. + +### http_requests_queue_total + +**Type**: Gauge + +**Description:** The total number of requests currently present in the request queue. + +| Label | Description | +| --------- | ----------- | +| method | (GET, PUT, POST, DELETE, HEAD, OPTIONS) | +| uri | The route used to serve the request, UNKNOWN if the route is not known | +| **priority** | (noncritical, normal, critical) | + +### http_requests_rejected_total + +**Type**: Counter + +**Description:** The total number of requests rejected, requests not processed, and not on the request queue. + +| Label | Description | +| --------- | ----------- | +| method | (GET, PUT, POST, DELETE, HEAD, OPTIONS) | +| uri | The route used to serve the request, UNKNOWN if the route is not known | +| reason | MaxConcurrentRequests, MaxCpuUsage, MaxMemoryUsage | +| **priority** | (noncritical, normal, critical) | + +### http_requests_queue_time_seconds + +**Type**: Histogram + +**Description:** Provides the time waiting in the queue. + +| Label | Description | +| --------- | ----------- | +| method | (GET, PUT, POST, DELETE, HEAD, OPTIONS) | +| uri | The route used to serve the request, UNKNOWN if the route is not known | +| **priority** | (noncritical, normal, critical) | + +### Health Checks and Metrics Exportation + +This solution adds an middleware in the request execution pipeline and the health check and metrics middlewares should be placed before the LoadShedding middleware to avoid reject those requests. + +# Drawbacks +[drawbacks]: #drawbacks + +This solution tends to increase the time of the requests with low priorities when there is an overload since they will be moved down in the queue when a request with higher priority arrives. \ No newline at end of file diff --git a/rfcs/resources/0001-concurrency-usage-metrics.png b/rfcs/resources/0001-concurrency-usage-metrics.png new file mode 100644 index 0000000..11727e1 Binary files /dev/null and b/rfcs/resources/0001-concurrency-usage-metrics.png differ diff --git a/rfcs/resources/0001-concurrent-requests-limit.png b/rfcs/resources/0001-concurrent-requests-limit.png new file mode 100644 index 0000000..1b902ae Binary files /dev/null and b/rfcs/resources/0001-concurrent-requests-limit.png differ diff --git a/rfcs/resources/0003-queue-flow.drawio.png b/rfcs/resources/0003-queue-flow.drawio.png new file mode 100644 index 0000000..cfc1e15 Binary files /dev/null and b/rfcs/resources/0003-queue-flow.drawio.png differ diff --git a/rfcs/resources/0003-queue-format.drawio.png b/rfcs/resources/0003-queue-format.drawio.png new file mode 100644 index 0000000..480c2ce Binary files /dev/null and b/rfcs/resources/0003-queue-format.drawio.png differ diff --git a/samples/Samples.WebApi/Controllers/WeatherForecastController.cs b/samples/Samples.WebApi/Controllers/WeatherForecastController.cs new file mode 100644 index 0000000..14de366 --- /dev/null +++ b/samples/Samples.WebApi/Controllers/WeatherForecastController.cs @@ -0,0 +1,27 @@ +using Farfetch.LoadShedding.AspNetCore.Attributes; +using Microsoft.AspNetCore.Mvc; +using MongoDB.Driver; + +namespace Samples.WebApi.Controllers +{ + [ApiController] + [Route("[controller]")] + public class WeatherForecastController : ControllerBase + { + private readonly IMongoCollection _collection; + + public WeatherForecastController(IMongoCollection collection) + { + this._collection = collection; + } + + [HttpGet(Name = "GetWeatherForecast")] + [EndpointPriority(Farfetch.LoadShedding.Tasks.Priority.Critical)] + public async Task> GetAsync() + { + return await _collection + .Find(FilterDefinition.Empty) + .ToListAsync(); + } + } +} diff --git a/samples/Samples.WebApi/Program.cs b/samples/Samples.WebApi/Program.cs new file mode 100644 index 0000000..f815dc2 --- /dev/null +++ b/samples/Samples.WebApi/Program.cs @@ -0,0 +1,90 @@ +using Farfetch.LoadShedding; +using MongoDB.Driver; +using Prometheus; +using Samples.WebApi; + +var builder = WebApplication.CreateBuilder(args); + +builder.Configuration.AddEnvironmentVariables(); + +// Add services to the container. + +builder.Services.AddHttpContextAccessor(); +builder.Services.AddControllers(); + +builder.Services.AddLoadShedding((provider, options) => +{ + options.AdaptativeLimiter.ConcurrencyOptions.MinQueueSize = 10; + options.AdaptativeLimiter.UseHeaderPriorityResolver(); + options.AddMetrics(); + + options.SubscribeEvents(events => + { + events.ItemEnqueued.Subscribe(args => Console.WriteLine($"QueueLimit: {args.QueueLimit}, QueueCount: {args.QueueCount}")); + events.ItemDequeued.Subscribe(args => Console.WriteLine($"QueueLimit: {args.QueueLimit}, QueueCount: {args.QueueCount}")); + events.ItemProcessing.Subscribe(args => Console.WriteLine($"ConcurrencyLimit: {args.ConcurrencyLimit}, ConcurrencyItems: {args.ConcurrencyCount}")); + events.ItemProcessed.Subscribe(args => Console.WriteLine($"ConcurrencyLimit: {args.ConcurrencyLimit}, ConcurrencyItems: {args.ConcurrencyCount}")); + events.Rejected.Subscribe(args => Console.Error.WriteLine($"Item rejected with Priority: {args.Priority}")); + }); +}); + +// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle +builder.Services.AddEndpointsApiExplorer(); +builder.Services.AddSwaggerGen(); + + +builder.Services.AddSingleton(_ => MongoUrl.Create(builder.Configuration["MongoConnectionString"])); + +builder.Services.AddSingleton(provider => new MongoClient(provider.GetRequiredService())); +builder.Services.AddSingleton(provider => +{ + var url = provider.GetRequiredService(); + var client = provider.GetRequiredService(); + return client.GetDatabase(url.DatabaseName); +}); + +builder.Services.AddSingleton>(provider => provider + .GetRequiredService() + .GetCollection(nameof(WeatherForecast))); + +var app = builder.Build(); + +app.UseMetricServer(); + +// Configure the HTTP request pipeline. +if (app.Environment.IsDevelopment()) +{ + app.UseSwagger(); + app.UseSwaggerUI(); +} + +app.UseLoadShedding(); + +app.MapControllers(); + +app.Lifetime.ApplicationStarted.Register(() => +{ + var collection = app.Services.GetRequiredService>(); + + if (collection.Find(FilterDefinition.Empty).Any()) + { + return; + } + + var summaries = new[] + { + "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" + }; + + var forecasts = Enumerable.Range(1, 5).Select(index => new WeatherForecast + { + Id = Guid.NewGuid(), + Date = DateTime.Now.AddDays(index), + TemperatureC = Random.Shared.Next(-20, 55), + Summary = summaries[Random.Shared.Next(summaries.Length)] + }); + + collection.InsertMany(forecasts); +}); + +app.Run(); diff --git a/samples/Samples.WebApi/Properties/launchSettings.json b/samples/Samples.WebApi/Properties/launchSettings.json new file mode 100644 index 0000000..5f02722 --- /dev/null +++ b/samples/Samples.WebApi/Properties/launchSettings.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://json.schemastore.org/launchsettings.json", + "profiles": { + "Samples.WebApi": { + "commandName": "Project", + "dotnetRunMessages": true, + "launchBrowser": true, + "launchUrl": "swagger", + "applicationUrl": "http://localhost:5261", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + } + }, + } +} diff --git a/samples/Samples.WebApi/Samples.WebApi.csproj b/samples/Samples.WebApi/Samples.WebApi.csproj new file mode 100644 index 0000000..e94fb92 --- /dev/null +++ b/samples/Samples.WebApi/Samples.WebApi.csproj @@ -0,0 +1,20 @@ + + + + net8.0 + enable + enable + + + + + + + + + + + + + + diff --git a/samples/Samples.WebApi/WeatherForecast.cs b/samples/Samples.WebApi/WeatherForecast.cs new file mode 100644 index 0000000..fc707e5 --- /dev/null +++ b/samples/Samples.WebApi/WeatherForecast.cs @@ -0,0 +1,15 @@ +namespace Samples.WebApi +{ + public class WeatherForecast + { + public Guid Id { get; set; } + + public DateTime Date { get; set; } + + public int TemperatureC { get; set; } + + public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); + + public string? Summary { get; set; } + } +} \ No newline at end of file diff --git a/samples/Samples.WebApi/appsettings.json b/samples/Samples.WebApi/appsettings.json new file mode 100644 index 0000000..2d2aad3 --- /dev/null +++ b/samples/Samples.WebApi/appsettings.json @@ -0,0 +1,10 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + }, + "AllowedHosts": "*", + "MongoConnectionString": "mongodb://localhost:27017/loadshedding" +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Attributes/EndpointPriorityAttribute.cs b/src/Farfetch.LoadShedding.AspNetCore/Attributes/EndpointPriorityAttribute.cs new file mode 100644 index 0000000..d1268b4 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Attributes/EndpointPriorityAttribute.cs @@ -0,0 +1,15 @@ +using System; +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.AspNetCore.Attributes +{ + public class EndpointPriorityAttribute : Attribute + { + public EndpointPriorityAttribute(Priority priority) + { + Priority = priority; + } + + internal Priority Priority { get; } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Configurators/LoadSheddingOptions.cs b/src/Farfetch.LoadShedding.AspNetCore/Configurators/LoadSheddingOptions.cs new file mode 100644 index 0000000..7c81234 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Configurators/LoadSheddingOptions.cs @@ -0,0 +1,45 @@ +using Farfetch.LoadShedding.AspNetCore.Options; +using Farfetch.LoadShedding.Calculators; +using Farfetch.LoadShedding.Events; +using System; +using System.Collections.Generic; + +namespace Farfetch.LoadShedding.AspNetCore.Configurators +{ + /// + /// Represents the LoadShedding Configurator. + /// + public class LoadSheddingOptions + { + internal LoadSheddingOptions() + { + this.AdaptativeLimiter = new AdaptativeLimiterOptions(); + this.EventSubscriptions = new List>(); + } + + /// + /// Gets adaptative limiter options. + /// + public AdaptativeLimiterOptions AdaptativeLimiter { get; } + + public IQueueSizeCalculator QueueSizeCalculator { internal get; set; } + + internal IList> EventSubscriptions { get; } + + /// + /// Subscribes the global LoadShedding events. + /// + /// The action to configure the event subscribers. + public void SubscribeEvents(Action eventsDelegate) + { + this.EventSubscriptions.Add(eventsDelegate); + } + + /// + /// Subscribes the global LoadShedding events. + /// + /// The action to configure the event subscribers. + public void SubscribeEvents(Action eventsDelegate) + => this.SubscribeEvents((_, events) => eventsDelegate?.Invoke(events)); + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Extensions/IApplicationBuilderExtensions.cs b/src/Farfetch.LoadShedding.AspNetCore/Extensions/IApplicationBuilderExtensions.cs new file mode 100644 index 0000000..6497c06 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Extensions/IApplicationBuilderExtensions.cs @@ -0,0 +1,47 @@ +using Farfetch.LoadShedding.AspNetCore.Configurators; +using Farfetch.LoadShedding.AspNetCore.Middlewares; +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Builders; +using Microsoft.Extensions.DependencyInjection; + +namespace Microsoft.AspNetCore.Builder +{ + /// + /// Extension to IApplicationBuilder in order to easily configure the UseLoadShedding. + /// + public static class IApplicationBuilderExtensions + { + /// + /// Extension to add LoadShedding middlewares to the request pipeline. + /// + /// The IApplicationBuilder instance. + /// IApplicationBuilder + public static IApplicationBuilder UseLoadShedding(this IApplicationBuilder appBuilder) + { + var options = appBuilder.ApplicationServices.GetService(); + + if (options == null) + { + options = new LoadSheddingOptions(); + } + + var adaptativeLimiter = new AdaptativeLimiterBuilder() + .WithOptions(options.AdaptativeLimiter.ConcurrencyOptions) + .WithCustomQueueSizeCalculator(options.QueueSizeCalculator) + .SubscribeEvents(events => + { + foreach (var listener in options.EventSubscriptions) + { + listener.Invoke(appBuilder.ApplicationServices, events); + } + }) + .Build(); + + appBuilder.UseMiddleware( + adaptativeLimiter, + options.AdaptativeLimiter.PriorityResolver ?? new DefaultPriorityResolver()); + + return appBuilder; + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Extensions/IServiceCollectionExtensions.cs b/src/Farfetch.LoadShedding.AspNetCore/Extensions/IServiceCollectionExtensions.cs new file mode 100644 index 0000000..3145d35 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Extensions/IServiceCollectionExtensions.cs @@ -0,0 +1,59 @@ +using Farfetch.LoadShedding.AspNetCore.Configurators; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using System; + +namespace Microsoft.AspNetCore.Builder +{ + /// + /// Extension to IServiceCollection in order to easily configure the AddLoadShedding. + /// + public static class IServiceCollectionExtensions + { + /// + /// Extension to configure the LoadShedding options. + /// + /// The IServiceCollection instance. + /// The configuration action. + /// IServiceCollection + public static IServiceCollection AddLoadShedding( + this IServiceCollection services, + Action configDelegate) + { + services.TryAddSingleton(); + + services.AddSingleton(provider => + { + var configurator = new LoadSheddingOptions(); + + configDelegate?.Invoke(provider, configurator); + + return configurator; + }); + + return services; + } + + /// + /// Extension to configure the LoadShedding options. + /// + /// The IServiceCollection instance. + /// The configuration action. + /// IServiceCollection + public static IServiceCollection AddLoadShedding(this IServiceCollection services, Action configDelegate) + { + return services.AddLoadShedding((_, options) => configDelegate?.Invoke(options)); + } + + /// + /// Extension to configure the LoadShedding options. + /// + /// The IServiceCollection instance. + /// IServiceCollection + public static IServiceCollection AddLoadShedding(this IServiceCollection services) + { + return services.AddLoadShedding(_ => { }); + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Farfetch.LoadShedding.AspNetCore.csproj b/src/Farfetch.LoadShedding.AspNetCore/Farfetch.LoadShedding.AspNetCore.csproj new file mode 100644 index 0000000..96c121c --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Farfetch.LoadShedding.AspNetCore.csproj @@ -0,0 +1,17 @@ + + + + netstandard2.0 + + + + + + + + + + + + + diff --git a/src/Farfetch.LoadShedding.AspNetCore/Middlewares/AdaptativeConcurrencyLimiterMiddleware.cs b/src/Farfetch.LoadShedding.AspNetCore/Middlewares/AdaptativeConcurrencyLimiterMiddleware.cs new file mode 100644 index 0000000..bd15eca --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Middlewares/AdaptativeConcurrencyLimiterMiddleware.cs @@ -0,0 +1,51 @@ +using System.Runtime.CompilerServices; +using System.Threading.Tasks; +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Exceptions; +using Farfetch.LoadShedding.Limiters; +using Microsoft.AspNetCore.Http; + +[assembly: InternalsVisibleTo("Farfetch.LoadShedding.AspNetCore.Tests")] +[assembly: InternalsVisibleTo("Farfetch.LoadShedding.BenchmarkTests")] + +namespace Farfetch.LoadShedding.AspNetCore.Middlewares +{ + internal class AdaptativeConcurrencyLimiterMiddleware + { + private readonly RequestDelegate _next; + private readonly IAdaptativeConcurrencyLimiter _limiter; + private readonly IPriorityResolver _priorityResolver; + + public AdaptativeConcurrencyLimiterMiddleware( + RequestDelegate next, + IAdaptativeConcurrencyLimiter limiter, + IPriorityResolver priorityResolver) + { + this._next = next; + this._limiter = limiter; + this._priorityResolver = priorityResolver ?? new DefaultPriorityResolver(); + } + + public async Task InvokeAsync(HttpContext context) + { + try + { + var priority = await this._priorityResolver + .ResolveAsync(context) + .ConfigureAwait(false); + + await this._limiter + .ExecuteAsync(priority, () => this._next.Invoke(context), context.RequestAborted) + .ConfigureAwait(false); + } + catch (LimitReachedException) + { + context.Response.StatusCode = StatusCodes.Status503ServiceUnavailable; + } + catch (QueueTimeoutException) + { + context.Response.StatusCode = StatusCodes.Status503ServiceUnavailable; + } + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Options/AdaptativeLimiterOptions.cs b/src/Farfetch.LoadShedding.AspNetCore/Options/AdaptativeLimiterOptions.cs new file mode 100644 index 0000000..aaa2a3e --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Options/AdaptativeLimiterOptions.cs @@ -0,0 +1,67 @@ +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using System; +using System.Threading.Tasks; + +namespace Farfetch.LoadShedding.AspNetCore.Options +{ + /// + /// Options for adaptative concurrency limiter. + /// + public class AdaptativeLimiterOptions + { + internal AdaptativeLimiterOptions() + { + this.ConcurrencyOptions = new ConcurrencyOptions(); + } + + public ConcurrencyOptions ConcurrencyOptions { get; } + + ///// + ///// Sets the EndpointPriorityResolver, it loads the priority from the controller action attribute EndpointPriorityAttribute. + ///// + ///// + public AdaptativeLimiterOptions UseEndpointPriorityResolver() + => this.UsePriorityResolver(new EndpointPriorityResolver()); + + + ///// + ///// Sets the HttpHeaderPriorityResolver, it converts the header X-Priority to the request priority (critical, normal, noncritical). + ///// + ///// AdaptativeLimiterOptions + public AdaptativeLimiterOptions UseHeaderPriorityResolver() + => this.UseHeaderPriorityResolver(HttpHeaderPriorityResolver.DefaultPriorityHeaderName); + + ///// + ///// Sets the HttpHeaderPriorityResolver, it converts the header {{headerName}} to the request priority (critical, normal, noncritical). + ///// + ///// The name of the header with the priority value. + ///// AdaptativeLimiterOptions + public AdaptativeLimiterOptions UseHeaderPriorityResolver(string headerName) + => this.UsePriorityResolver(new HttpHeaderPriorityResolver(headerName)); + + /// + /// Sets a custom priority resolver function. + /// + /// Delegate to resolver the request priority. + /// AdaptativeLimiterOptions + public AdaptativeLimiterOptions UsePriorityResolver(Func> priorityResolverFunc) + => this.UsePriorityResolver(new CustomPriorityResolver(priorityResolverFunc)); + + + /// + /// Sets a custom priority resolver instance. + /// + /// Priority resolver instance. + /// AdaptativeLimiterOptions + public AdaptativeLimiterOptions UsePriorityResolver(IPriorityResolver priorityResolver) + { + this.PriorityResolver = priorityResolver; + return this; + } + + internal IPriorityResolver PriorityResolver { get; set; } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Resolvers/CustomPriorityResolver.cs b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/CustomPriorityResolver.cs new file mode 100644 index 0000000..7aad518 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/CustomPriorityResolver.cs @@ -0,0 +1,22 @@ +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using System; +using System.Threading.Tasks; + +namespace Farfetch.LoadShedding.AspNetCore.Resolvers +{ + internal class CustomPriorityResolver : IPriorityResolver + { + private readonly Func> _resolverFunc; + + public CustomPriorityResolver(Func> resolverFunc) + { + this._resolverFunc = resolverFunc; + } + + public Task ResolveAsync(HttpContext context) + { + return this._resolverFunc.Invoke(context); + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Resolvers/DefaultPriorityResolver.cs b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/DefaultPriorityResolver.cs new file mode 100644 index 0000000..c8a5b29 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/DefaultPriorityResolver.cs @@ -0,0 +1,19 @@ +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using System.Threading.Tasks; + +namespace Farfetch.LoadShedding.AspNetCore.Resolvers +{ + /// + /// This resolver always returns the default priority (Normal). + /// + internal class DefaultPriorityResolver : IPriorityResolver + { + private static readonly Task s_defaultResult = Task.FromResult(Priority.Normal); + + public Task ResolveAsync(HttpContext context) + { + return s_defaultResult; + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Resolvers/EndpointPriorityResolver.cs b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/EndpointPriorityResolver.cs new file mode 100644 index 0000000..90593b2 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/EndpointPriorityResolver.cs @@ -0,0 +1,29 @@ +using System.Threading.Tasks; +using Farfetch.LoadShedding.AspNetCore.Attributes; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Http.Features; + +namespace Farfetch.LoadShedding.AspNetCore.Resolvers +{ + internal class EndpointPriorityResolver : IPriorityResolver + { + public Task ResolveAsync(HttpContext context) + { + var priority = this.GetEndpointPriority(context); + + return Task.FromResult(priority); + } + + private Priority GetEndpointPriority(HttpContext context) + { + return context? + .Features? + .Get()? + .Endpoint? + .Metadata? + .GetMetadata()? + .Priority ?? Priority.Normal; + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Resolvers/HttpHeaderPriorityResolver.cs b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/HttpHeaderPriorityResolver.cs new file mode 100644 index 0000000..9adb2a7 --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/HttpHeaderPriorityResolver.cs @@ -0,0 +1,50 @@ +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using System; +using System.Threading.Tasks; + +namespace Farfetch.LoadShedding.AspNetCore.Resolvers +{ + internal class HttpHeaderPriorityResolver : IPriorityResolver + { + private const string Separator = "-"; + + internal const string DefaultPriorityHeaderName = "X-Priority"; + + private readonly string _headerName; + + public HttpHeaderPriorityResolver() + : this(DefaultPriorityHeaderName) + { + } + + public HttpHeaderPriorityResolver(string headerName) + { + this._headerName = headerName; + } + + public Task ResolveAsync(HttpContext context) + { + if (!context.Request.Headers.TryGetValue(this._headerName, out var values)) + { + return Task.FromResult(Priority.Normal); + } + + var normalizedValue = this.NormalizeHeaderValue(values); + + if (!Enum.TryParse(normalizedValue, true, out Priority priority)) + { + priority = Priority.Normal; + } + + return Task.FromResult(priority); + } + + private string NormalizeHeaderValue(string headerValue) + { + return headerValue + .Replace(Separator, String.Empty) + .ToLower(); + } + } +} diff --git a/src/Farfetch.LoadShedding.AspNetCore/Resolvers/IPriorityResolver.cs b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/IPriorityResolver.cs new file mode 100644 index 0000000..2233b6f --- /dev/null +++ b/src/Farfetch.LoadShedding.AspNetCore/Resolvers/IPriorityResolver.cs @@ -0,0 +1,19 @@ +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using System.Threading.Tasks; + +namespace Farfetch.LoadShedding.AspNetCore.Resolvers +{ + /// + /// Priority resolver contract. + /// + public interface IPriorityResolver + { + /// + /// Resolves the task priority. + /// + /// The HttpContext instance. + /// The task priority. + Task ResolveAsync(HttpContext context); + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Farfetch.LoadShedding.Prometheus.csproj b/src/Farfetch.LoadShedding.Prometheus/Farfetch.LoadShedding.Prometheus.csproj new file mode 100644 index 0000000..7062bcd --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Farfetch.LoadShedding.Prometheus.csproj @@ -0,0 +1,15 @@ + + + + netstandard2.0 + + + + + + + + + + + diff --git a/src/Farfetch.LoadShedding.Prometheus/IAdaptativeLimiterOptionsExtensions.cs b/src/Farfetch.LoadShedding.Prometheus/IAdaptativeLimiterOptionsExtensions.cs new file mode 100644 index 0000000..33cb2b4 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/IAdaptativeLimiterOptionsExtensions.cs @@ -0,0 +1,180 @@ +using Farfetch.LoadShedding.AspNetCore.Configurators; +using Farfetch.LoadShedding.Prometheus; +using Farfetch.LoadShedding.Prometheus.Metrics; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.DependencyInjection; +using Prometheus; +using System; + +namespace Farfetch.LoadShedding +{ + /// + /// Extension methods of IAdaptativeLimiterOptions + /// + public static class IAdaptativeLimiterOptionsExtensions + { + /// + /// Extension method to include prometheus metrics. + /// + /// The LoadSheddingOptions instance. + /// The Prometheus registry collector. + /// LoadSheddingOptions + public static LoadSheddingOptions AddMetrics( + this LoadSheddingOptions options, + Action optionsDelegate = null) + { + options.SubscribeEvents((provider, events) => + { + var metricsOptions = new LoadSheddingMetricOptions(Metrics.DefaultRegistry); + + optionsDelegate?.Invoke(metricsOptions); + + var concurrencyItemsGauge = new HttpRequestsConcurrencyItemsGauge(metricsOptions.Registry, metricsOptions.ConcurrencyItems); + var concurrencyLimitGauge = new HttpRequestsConcurrencyLimitGauge(metricsOptions.Registry, metricsOptions.ConcurrencyLimit); + var taskProcessingTimeHistogram = new HttpRequestsQueueTaskExecutionTimeHistogram(metricsOptions.Registry, metricsOptions.TaskExecutionTime); + var queueItemsGauge = new HttpRequestsQueueItemsGauge(metricsOptions.Registry, metricsOptions.QueueItems); + var queueLimitGauge = new HttpRequestsQueueLimitGauge(metricsOptions.Registry, metricsOptions.QueueLimit); + var queueTimeHistogram = new HttpRequestsQueueTimeHistogram(metricsOptions.Registry, metricsOptions.QueueTime); + var rejectedCounter = new HttpRequestsRejectedCounter(metricsOptions.Registry, metricsOptions.RequestRejected); + + var accessor = provider.GetRequiredService(); + + events.SubscribeConcurrencyLimitChangedEvent(concurrencyLimitGauge); + events.SubscribeQueueLimitChangedEvent(queueLimitGauge); + events.SubscribeItemProcessingEvent(concurrencyItemsGauge, accessor); + events.SubscribeItemProcessedEvent(concurrencyItemsGauge, taskProcessingTimeHistogram, accessor); + events.SubscribeItemEnqueuedEvent(queueItemsGauge, accessor); + events.SubscribeItemDequeuedEvent(queueItemsGauge, queueTimeHistogram, accessor); + events.SubscribeRejectedEvent(rejectedCounter, accessor); + }); + + return options; + } + + private static void SubscribeItemDequeuedEvent( + this Events.ILoadSheddingEvents events, + HttpRequestsQueueItemsGauge queueItemsGauge, + HttpRequestsQueueTimeHistogram queueTimeHistogram, + IHttpContextAccessor accessor) + { + if (!queueItemsGauge.IsEnabled && !queueTimeHistogram.IsEnabled) + { + return; + } + + events.ItemDequeued.Subscribe(args => + { + var method = accessor.GetMethod(); + + queueItemsGauge.Set(method, args.Priority.FormatPriority(), args.QueueCount); + queueTimeHistogram.Observe(method, args.Priority.FormatPriority(), args.QueueTime.TotalSeconds); + }); + } + + private static void SubscribeItemProcessedEvent( + this Events.ILoadSheddingEvents events, + HttpRequestsConcurrencyItemsGauge concurrencyItemsGauge, + HttpRequestsQueueTaskExecutionTimeHistogram taskProcessingTimeHistogram, + IHttpContextAccessor accessor) + { + if (!concurrencyItemsGauge.IsEnabled && !taskProcessingTimeHistogram.IsEnabled) + { + return; + } + + events.ItemProcessed.Subscribe(args => + { + var method = accessor.GetMethod(); + var priority = args.Priority.FormatPriority(); + + concurrencyItemsGauge.Set(method, priority, args.ConcurrencyCount); + taskProcessingTimeHistogram.Observe(method, priority, args.ProcessingTime.TotalSeconds); + }); + } + + private static void SubscribeConcurrencyLimitChangedEvent( + this Events.ILoadSheddingEvents events, + HttpRequestsConcurrencyLimitGauge concurrencyLimitGauge) + { + if (concurrencyLimitGauge.IsEnabled) + { + events.ConcurrencyLimitChanged.Subscribe(args => concurrencyLimitGauge.Set(args.Limit)); + } + } + + private static void SubscribeQueueLimitChangedEvent(this Events.ILoadSheddingEvents events, HttpRequestsQueueLimitGauge queueLimitGauge) + { + if (queueLimitGauge.IsEnabled) + { + events.QueueLimitChanged.Subscribe(args => queueLimitGauge.Set(args.Limit)); + } + } + + private static void SubscribeItemProcessingEvent( + this Events.ILoadSheddingEvents events, + HttpRequestsConcurrencyItemsGauge concurrencyItemsGauge, + IHttpContextAccessor accessor) + { + if (!concurrencyItemsGauge.IsEnabled) + { + return; + } + + events.ItemProcessing.Subscribe(args => + { + concurrencyItemsGauge.Set( + accessor.GetMethod(), + args.Priority.FormatPriority(), + args.ConcurrencyCount); + }); + } + + private static void SubscribeItemEnqueuedEvent(this Events.ILoadSheddingEvents events, HttpRequestsQueueItemsGauge queueItemsGauge, IHttpContextAccessor accessor) + { + if (!queueItemsGauge.IsEnabled) + { + return; + } + + events.ItemEnqueued.Subscribe(args => + { + queueItemsGauge.Set( + accessor.GetMethod(), + args.Priority.FormatPriority(), + args.QueueCount); + }); + } + + private static void SubscribeRejectedEvent(this Events.ILoadSheddingEvents events, HttpRequestsRejectedCounter rejectedCounter, IHttpContextAccessor accessor) + { + if (!rejectedCounter.IsEnabled) + { + return; + } + + events.Rejected.Subscribe(args => + { + rejectedCounter.Increment( + accessor.GetMethod(), + args.Priority.FormatPriority(), + args.Reason); + }); + } + + private static string GetMethod(this IHttpContextAccessor accessor) + { + if (accessor?.HttpContext?.Request is null) + { + return MetricsConstants.Unknown; + } + + return accessor.HttpContext.Request.Method.ToUpper(); + } + + private static string FormatPriority(this Priority priority) + { + return priority.ToString().ToLower(); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/LoadSheddingMetricOptions.cs b/src/Farfetch.LoadShedding.Prometheus/LoadSheddingMetricOptions.cs new file mode 100644 index 0000000..e45d3c7 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/LoadSheddingMetricOptions.cs @@ -0,0 +1,36 @@ +using Farfetch.LoadShedding.Prometheus.Metrics; +using Prometheus; + +namespace Farfetch.LoadShedding.Prometheus +{ + public class LoadSheddingMetricOptions + { + internal LoadSheddingMetricOptions(CollectorRegistry registry) + { + this.Registry = registry; + this.ConcurrencyItems = new MetricOptions(); + this.ConcurrencyLimit = new MetricOptions(); + this.QueueItems = new MetricOptions(); + this.QueueLimit = new MetricOptions(); + this.TaskExecutionTime = new MetricOptions(); + this.QueueTime = new MetricOptions(); + this.RequestRejected = new MetricOptions(); + } + + public CollectorRegistry Registry { get; set; } + + public MetricOptions ConcurrencyItems { get; } + + public MetricOptions ConcurrencyLimit { get; } + + public MetricOptions QueueItems { get; } + + public MetricOptions QueueLimit { get; } + + public MetricOptions TaskExecutionTime { get; } + + public MetricOptions QueueTime { get; } + + public MetricOptions RequestRejected { get; } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/MetricOptions.cs b/src/Farfetch.LoadShedding.Prometheus/MetricOptions.cs new file mode 100644 index 0000000..88497ad --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/MetricOptions.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Farfetch.LoadShedding.Prometheus +{ + public class MetricOptions + { + public bool Enabled { get; set; } = true; + + public string Name { get; set; } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsConcurrencyItemsGauge.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsConcurrencyItemsGauge.cs new file mode 100644 index 0000000..0a7a8a4 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsConcurrencyItemsGauge.cs @@ -0,0 +1,37 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsConcurrencyItemsGauge : MetricBase + { + private const string Description = "The current number of executions concurrently"; + + internal HttpRequestsConcurrencyItemsGauge( + CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_concurrency_items_total"; + + public void Set(string method, string priority, double value) + { + this.Metric? + .WithLabels(method, priority) + .Set(value); + } + + protected override Gauge Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase + .Metrics + .WithCustomRegistry(registry) + .CreateGauge(options.Name, Description, new PrometheusBase.GaugeConfiguration + { + LabelNames = new[] { MetricsConstants.MethodLabel, MetricsConstants.PriorityLabel } + }); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsConcurrencyLimitGauge.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsConcurrencyLimitGauge.cs new file mode 100644 index 0000000..72fc215 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsConcurrencyLimitGauge.cs @@ -0,0 +1,32 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsConcurrencyLimitGauge : MetricBase + { + private const string Description = "The current concurrency limit"; + + internal HttpRequestsConcurrencyLimitGauge( + PrometheusBase.CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_concurrency_limit_total"; + + public void Set(double value) + { + this.Metric?.Set(value); + } + + protected override Gauge Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase + .Metrics + .WithCustomRegistry(registry) + .CreateGauge(options.Name, Description); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueItemsGauge.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueItemsGauge.cs new file mode 100644 index 0000000..641af92 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueItemsGauge.cs @@ -0,0 +1,41 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsQueueItemsGauge : MetricBase + { + private const string Description = "The current number of items waiting to be processed in the queue"; + + internal HttpRequestsQueueItemsGauge( + CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_queue_items_total"; + + public void Set(string method, string priority, double value) + { + this.Metric? + .WithLabels(method, priority) + .Set(value); + } + + protected override Gauge Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase + .Metrics + .WithCustomRegistry(registry) + .CreateGauge(options.Name, Description, new GaugeConfiguration + { + LabelNames = new[] + { + MetricsConstants.MethodLabel, + MetricsConstants.PriorityLabel + } + }); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueLimitGauge.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueLimitGauge.cs new file mode 100644 index 0000000..b46b251 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueLimitGauge.cs @@ -0,0 +1,32 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsQueueLimitGauge : MetricBase + { + private const string Description = "The current queue limit size"; + + internal HttpRequestsQueueLimitGauge( + CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_queue_limit_total"; + + public void Set(double value) + { + this.Metric?.Set(value); + } + + protected override Gauge Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase + .Metrics + .WithCustomRegistry(registry) + .CreateGauge(options.Name, Description); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueTaskExecutionTimeHistogram.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueTaskExecutionTimeHistogram.cs new file mode 100644 index 0000000..8b1704e --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueTaskExecutionTimeHistogram.cs @@ -0,0 +1,38 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsQueueTaskExecutionTimeHistogram : MetricBase + { + private const string Description = "The time each request spent in processing the task"; + + internal HttpRequestsQueueTaskExecutionTimeHistogram( + CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_task_processing_time_seconds"; + + public void Observe(string method, string priority, double value) + { + this.Metric? + .WithLabels(method, priority) + .Observe(value); + } + + protected override Histogram Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase + .Metrics + .WithCustomRegistry(registry) + .CreateHistogram(options.Name, Description, new PrometheusBase.HistogramConfiguration + { + LabelNames = new[] { MetricsConstants.MethodLabel, MetricsConstants.PriorityLabel }, + Buckets = Histogram.ExponentialBuckets(0.001, 2, 20) + }); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueTimeHistogram.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueTimeHistogram.cs new file mode 100644 index 0000000..bde5f7e --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsQueueTimeHistogram.cs @@ -0,0 +1,38 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsQueueTimeHistogram : MetricBase + { + private const string Description = "The time each request spent in the queue until its executed"; + + internal HttpRequestsQueueTimeHistogram( + PrometheusBase.CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_queue_time_seconds"; + + public void Observe(string method, string priority, double value) + { + this.Metric? + .WithLabels(method, priority) + .Observe(value); + } + + protected override Histogram Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase + .Metrics + .WithCustomRegistry(registry) + .CreateHistogram(options.Name, Description, new PrometheusBase.HistogramConfiguration + { + LabelNames = new[] { MetricsConstants.MethodLabel, MetricsConstants.PriorityLabel }, + Buckets = Histogram.ExponentialBuckets(0.0005, 2, 20) + }); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsRejectedCounter.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsRejectedCounter.cs new file mode 100644 index 0000000..a2caf47 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/HttpRequestsRejectedCounter.cs @@ -0,0 +1,39 @@ +using Prometheus; +using PrometheusBase = Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public class HttpRequestsRejectedCounter : MetricBase + { + private const string Description = "The number of requests rejected because the queue limit is reached"; + + internal HttpRequestsRejectedCounter( + CollectorRegistry collectorRegistry, + MetricOptions options) + : base(collectorRegistry, options) + { + } + + protected override string DefaultName => "http_requests_rejected_total"; + + public void Increment(string method, string priority, string reason) + { + this.Metric?.WithLabels(method, priority, reason).Inc(); + } + + protected override Counter Create(CollectorRegistry registry, MetricOptions options) + { + return PrometheusBase.Metrics + .WithCustomRegistry(registry) + .CreateCounter(options.Name ?? DefaultName, Description, new CounterConfiguration + { + LabelNames = new[] + { + MetricsConstants.MethodLabel, + MetricsConstants.PriorityLabel, + MetricsConstants.ReasonLabel + } + }); + } + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/Metrics/MetricBase.cs b/src/Farfetch.LoadShedding.Prometheus/Metrics/MetricBase.cs new file mode 100644 index 0000000..96e5acd --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/Metrics/MetricBase.cs @@ -0,0 +1,27 @@ +using Prometheus; + +namespace Farfetch.LoadShedding.Prometheus.Metrics +{ + public abstract class MetricBase + { + protected abstract string DefaultName { get; } + + protected MetricBase(CollectorRegistry registry, MetricOptions options) + { + this.IsEnabled = options.Enabled; + + if (options.Enabled) + { + options.Name = options.Name ?? this.DefaultName; + + this.Metric = this.Create(registry, options); + } + } + + public bool IsEnabled { get; } + + protected T Metric { get; } + + protected abstract T Create(CollectorRegistry registry, MetricOptions options); + } +} diff --git a/src/Farfetch.LoadShedding.Prometheus/MetricsConstants.cs b/src/Farfetch.LoadShedding.Prometheus/MetricsConstants.cs new file mode 100644 index 0000000..b9be207 --- /dev/null +++ b/src/Farfetch.LoadShedding.Prometheus/MetricsConstants.cs @@ -0,0 +1,13 @@ +namespace Farfetch.LoadShedding.Prometheus +{ + internal static class MetricsConstants + { + public const string Unknown = "UNKNOWN"; + + public const string MethodLabel = "method"; + + public const string PriorityLabel = "priority"; + + public const string ReasonLabel = "reason"; + } +} diff --git a/src/Farfetch.LoadShedding/Builders/AdaptativeLimiterBuilder.cs b/src/Farfetch.LoadShedding/Builders/AdaptativeLimiterBuilder.cs new file mode 100644 index 0000000..e3ac1f9 --- /dev/null +++ b/src/Farfetch.LoadShedding/Builders/AdaptativeLimiterBuilder.cs @@ -0,0 +1,89 @@ +using System; +using Farfetch.LoadShedding.Calculators; +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Limiters; +using Farfetch.LoadShedding.Events; + +namespace Farfetch.LoadShedding.Builders +{ + /// + /// A Builder to configure an Adaptative Concurrency Limiter. + /// + public class AdaptativeLimiterBuilder + { + private readonly ILoadSheddingEvents _events; + private readonly ILimitCalculator _limitCalculator; + private IQueueSizeCalculator _queueSizeCalculator; + + private ConcurrencyOptions _options; + + /// + /// Constructor that creates ConcurrencyOptions, MetricsManager, LimitCalculator, and QueueSizeCalculator properties by default. + /// + public AdaptativeLimiterBuilder() + { + this._options = new ConcurrencyOptions(); + this._events = new LoadSheddingEvents(); + this._limitCalculator = new GradientLimitCalculator(this._options); + } + + /// + /// Sets the Concurrency Options configured by the client. + /// + /// An action to configure the ConcurrencyOptions. + /// The AdaptativeLimiterBuilder updated with the options. + public AdaptativeLimiterBuilder WithOptions(Action options) + { + options?.Invoke(this._options); + return this; + } + + /// + /// Sets the Concurrency Options configured by the client. + /// + /// An action to configure the ConcurrencyOptions. + /// The AdaptativeLimiterBuilder updated with the options. + public AdaptativeLimiterBuilder WithOptions(ConcurrencyOptions options) + { + this._options = options; + return this; + } + + /// + /// Allows to subscribe limiter events. + /// + /// An action to configure what to do when a events is raised. + /// The AdaptativeLimiterBuilder updated with a events listener. + public AdaptativeLimiterBuilder SubscribeEvents(Action eventsListener) + { + eventsListener?.Invoke(this._events); + return this; + } + + /// + /// Sets to a custom Queue Size Calculator configured by the client. + /// + /// An implementation of a IQueueSizeCalculator + /// The AdaptativeLimiterBuilder updated with a custom queue size calculator. + public AdaptativeLimiterBuilder WithCustomQueueSizeCalculator(IQueueSizeCalculator queueSizeCalculator) + { + this._queueSizeCalculator = queueSizeCalculator; + return this; + } + + /// + /// Creates an instance of an AdaptativeConcurrencyLimiter with configured properties. + /// + /// An intance of an AdaptativeConcurrencyLimiter. + public IAdaptativeConcurrencyLimiter Build() + { + this._options.Validate(); + + return new AdaptativeConcurrencyLimiter( + this._options, + this._limitCalculator, + this._queueSizeCalculator ?? new SquareRootQueueCalculator(this._options), + this._events); + } + } +} diff --git a/src/Farfetch.LoadShedding/Calculators/GradientLimitCalculator.cs b/src/Farfetch.LoadShedding/Calculators/GradientLimitCalculator.cs new file mode 100644 index 0000000..96cee14 --- /dev/null +++ b/src/Farfetch.LoadShedding/Calculators/GradientLimitCalculator.cs @@ -0,0 +1,72 @@ +using System; +using Farfetch.LoadShedding.Configurations; + +namespace Farfetch.LoadShedding.Calculators +{ + internal class GradientLimitCalculator : ILimitCalculator + { + private const double SmoothingFactor = 0.2; + + private readonly ConcurrencyOptions _options; + + public GradientLimitCalculator(ConcurrencyOptions options) + { + this._options = options; + } + + /// + /// Calculate the new concurrency limit dinamically. + /// + public int CalculateLimit(IConcurrencyContext context) + { + var currentLimit = context.MaxConcurrencyLimit; + + if (context.AvgRTT == 0) + { + return currentLimit; + } + + var gradient = this.CalculateConcurrencyLimitGradient(context); + + var newLimit = currentLimit * gradient + context.CurrentQueueCount; + + if (newLimit < currentLimit) + { + newLimit = SmoothNewLimit(currentLimit, newLimit); + } + + newLimit = Math.Min(this._options.MaxConcurrencyLimit, newLimit); + + if (KeepCurrentLimit(currentLimit, newLimit, context)) + { + return currentLimit; + } + + return (int)Math.Max(this._options.MinConcurrencyLimit, newLimit); + } + + /// + /// It calculates the new concurrency limit gradient by multiplying the minimum round trip + /// value with the tolerance, and dividing by the round trip average value. + /// The result should never be minor than 0.5 so that half of the requests are not lost. + /// + private double CalculateConcurrencyLimitGradient(IConcurrencyContext context) => + Math.Max(0.5, Math.Min(1, context.MinRTT * this._options.Tolerance / context.AvgRTT)); + + /// + /// If the current limit is greater than the new limit, is needed to smooth + /// the new limit value in order to avoid abrupt changes. + /// A reasonable value of 0.2 (20%) was chosen to do the smooth operation. + /// The result should always be greater or equals to the minimum concurrency limit. + /// + private double SmoothNewLimit(double currentLimit, double newLimit) => + (currentLimit * (1 - SmoothingFactor)) + (SmoothingFactor * newLimit); + + /// + /// The current limit should be kept if some criteria are accomplished. + /// This prevents from changing to a more unstable limit. + /// + private static bool KeepCurrentLimit(double currentLimit, double newLimit, IConcurrencyContext context) => + newLimit < currentLimit && context.PreviousAvgRTT >= context.AvgRTT; + } +} diff --git a/src/Farfetch.LoadShedding/Calculators/ILimitCalculator.cs b/src/Farfetch.LoadShedding/Calculators/ILimitCalculator.cs new file mode 100644 index 0000000..3144282 --- /dev/null +++ b/src/Farfetch.LoadShedding/Calculators/ILimitCalculator.cs @@ -0,0 +1,17 @@ +using Farfetch.LoadShedding.Configurations; + +namespace Farfetch.LoadShedding.Calculators +{ + /// + /// Represents an interface that is responsible for the abstraction of different types of limit calculators. + /// + public interface ILimitCalculator + { + /// + /// Responsible for calculate and return a limit value. + /// + /// A concurrency context properties. + /// An integer limit based on the calculator logic and the current concurrency context. + int CalculateLimit(IConcurrencyContext context); + } +} diff --git a/src/Farfetch.LoadShedding/Calculators/IQueueSizeCalculator.cs b/src/Farfetch.LoadShedding/Calculators/IQueueSizeCalculator.cs new file mode 100644 index 0000000..f5ba064 --- /dev/null +++ b/src/Farfetch.LoadShedding/Calculators/IQueueSizeCalculator.cs @@ -0,0 +1,17 @@ +using Farfetch.LoadShedding.Configurations; + +namespace Farfetch.LoadShedding.Calculators +{ + /// + /// Represents an interface that is responsible for the abstraction of different types of queue size calculators. + /// + public interface IQueueSizeCalculator + { + /// + /// Responsible for calculate and return a queue size value. + /// + /// A concurrency context properties. + /// An integer value based on the calculator logic and the current concurrency context. + int CalculateQueueSize(IConcurrencyContext context); + } +} diff --git a/src/Farfetch.LoadShedding/Calculators/SquareRootQueueCalculator.cs b/src/Farfetch.LoadShedding/Calculators/SquareRootQueueCalculator.cs new file mode 100644 index 0000000..20ce223 --- /dev/null +++ b/src/Farfetch.LoadShedding/Calculators/SquareRootQueueCalculator.cs @@ -0,0 +1,22 @@ +using System; +using Farfetch.LoadShedding.Configurations; + +namespace Farfetch.LoadShedding.Calculators +{ + internal class SquareRootQueueCalculator : IQueueSizeCalculator + { + private readonly ConcurrencyOptions _options; + + public SquareRootQueueCalculator(ConcurrencyOptions options) + { + this._options = options; + } + + public int CalculateQueueSize(IConcurrencyContext context) + { + return (int)Math.Max( + this._options.MinQueueSize, + Math.Ceiling(Math.Sqrt(context.MaxConcurrencyLimit))); + } + } +} diff --git a/src/Farfetch.LoadShedding/Configurations/ConcurrencyContext.cs b/src/Farfetch.LoadShedding/Configurations/ConcurrencyContext.cs new file mode 100644 index 0000000..1ad97cc --- /dev/null +++ b/src/Farfetch.LoadShedding/Configurations/ConcurrencyContext.cs @@ -0,0 +1,34 @@ +using Farfetch.LoadShedding.Measures; +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Configurations +{ + internal class ConcurrencyContext : IConcurrencyContext + { + private readonly ITaskManager _taskManager; + private readonly RTTMeasures _measures; + + public ConcurrencyContext(ITaskManager taskManager, RTTMeasures measures) + { + this._taskManager = taskManager; + this._measures = measures; + } + + public int MaxConcurrencyLimit => this._taskManager.ConcurrencyLimit; + + public int MaxQueueSize => this._taskManager.QueueLimit; + + public int CurrentQueueCount => this._taskManager.QueueCount; + + public long AvgRTT => this._measures.AvgRTT; + + public long MinRTT => this._measures.MinRTT; + + public long PreviousAvgRTT { get; private set; } + + internal void Snapshot() + { + this.PreviousAvgRTT = this.AvgRTT; + } + } +} diff --git a/src/Farfetch.LoadShedding/Configurations/ConcurrencyOptions.cs b/src/Farfetch.LoadShedding/Configurations/ConcurrencyOptions.cs new file mode 100644 index 0000000..68ded1b --- /dev/null +++ b/src/Farfetch.LoadShedding/Configurations/ConcurrencyOptions.cs @@ -0,0 +1,85 @@ +using Farfetch.LoadShedding.Exceptions; +using System.Threading; + +namespace Farfetch.LoadShedding.Configurations +{ + /// + /// Settings to configure the Adaptative Limiter. + /// + public class ConcurrencyOptions + { + /// + /// Gets or sets the minimum concurrency limit. + /// + public int MinConcurrencyLimit { get; set; } = 5; + + /// + /// Gets or sets the initial concurrency limit. + /// + public int InitialConcurrencyLimit { get; set; } = 5; + + /// + /// Gets or sets the maximum concurrency limit. + /// + public int MaxConcurrencyLimit { get; set; } = 500; + + /// + /// Gets or sets the tolerance. + /// + public double Tolerance { get; set; } = 1.5; + + /// + /// Gets or sets the minimum queue size. + /// + public int MinQueueSize { get; set; } = 20; + + /// + /// Gets or sets the initial queue size. + /// + public int InitialQueueSize { get; set; } = 20; + + /// + /// Gets or sets the queue waiting timeout, when the timeout is reached the task will be canceled and will throw an OperationCanceledException. + /// + public int QueueTimeoutInMs { get; set; } = Timeout.Infinite; + + internal void Validate() + { + const int CommonMinThreshold = 0, MinToleranceThreshold = 1; + + if (this.IsLimitQueuePropertiesWithInvalidValues(CommonMinThreshold)) + { + throw new InvalidConfigurationException($"The value of {nameof(this.MinConcurrencyLimit)}, {nameof(this.InitialConcurrencyLimit)}, {nameof(this.MaxConcurrencyLimit)}," + + $" {nameof(this.MinQueueSize)}, or {nameof(this.InitialQueueSize)} should be greater than {CommonMinThreshold}"); + } + + if (this.Tolerance <= MinToleranceThreshold) + { + throw new InvalidConfigurationException($"The value of {nameof(this.Tolerance)} should be greater than {MinToleranceThreshold}"); + } + + if (this.MinConcurrencyLimit >= this.MaxConcurrencyLimit) + { + throw new InvalidConfigurationException($"The value of {nameof(this.MaxConcurrencyLimit)} should be greater than the {nameof(this.MinConcurrencyLimit)}"); + } + + if (this.MinConcurrencyLimit > this.InitialConcurrencyLimit || this.MaxConcurrencyLimit < this.InitialConcurrencyLimit) + { + throw new InvalidConfigurationException($"The value of {nameof(this.InitialConcurrencyLimit)} should be greater than {nameof(this.MinConcurrencyLimit)} " + + $"and less than {nameof(this.MaxConcurrencyLimit)}"); + } + + if (this.MinQueueSize > this.InitialQueueSize) + { + throw new InvalidConfigurationException($"The value of {nameof(this.InitialQueueSize)} should be greater than the {nameof(this.MinQueueSize)}"); + } + } + + private bool IsLimitQueuePropertiesWithInvalidValues(int threshold) => + this.MinConcurrencyLimit <= threshold + || this.InitialConcurrencyLimit <= threshold + || this.MaxConcurrencyLimit <= threshold + || this.MinQueueSize <= threshold + || this.InitialQueueSize <= threshold; + } +} diff --git a/src/Farfetch.LoadShedding/Configurations/IConcurrencyContext.cs b/src/Farfetch.LoadShedding/Configurations/IConcurrencyContext.cs new file mode 100644 index 0000000..9f3b834 --- /dev/null +++ b/src/Farfetch.LoadShedding/Configurations/IConcurrencyContext.cs @@ -0,0 +1,38 @@ +namespace Farfetch.LoadShedding.Configurations +{ + /// + /// Represents the concurrency context contract. + /// + public interface IConcurrencyContext + { + /// + /// Gets the maximum concurrency limit. + /// + int MaxConcurrencyLimit { get; } + + /// + /// Gets the maximum queue size. + /// + int MaxQueueSize { get; } + + /// + /// Gets the current queue count. + /// + int CurrentQueueCount { get; } + + /// + /// Gets the current average round trip time. + /// + long AvgRTT { get; } + + /// + /// Gets the min round trip time. + /// + long MinRTT { get; } + + /// + /// Gets the previous average round trip time. + /// + long PreviousAvgRTT { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Constants/RejectionReasons.cs b/src/Farfetch.LoadShedding/Constants/RejectionReasons.cs new file mode 100644 index 0000000..7c94598 --- /dev/null +++ b/src/Farfetch.LoadShedding/Constants/RejectionReasons.cs @@ -0,0 +1,9 @@ +namespace Farfetch.LoadShedding.Constants +{ + internal static class RejectionReasons + { + public const string MaxQueueItems = "max_queue_items"; + + public const string QueueTimeout = "queue_timeout"; + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/ItemDequeuedEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/ItemDequeuedEventArgs.cs new file mode 100644 index 0000000..0b1b526 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/ItemDequeuedEventArgs.cs @@ -0,0 +1,28 @@ +using System; +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Event args for the task dequeued event. + /// + public class ItemDequeuedEventArgs : ItemEventArgs + { + internal ItemDequeuedEventArgs(Priority priority, TimeSpan queueTime, int queueLimit, int queueCount) + : base(priority) + { + this.QueueTime = queueTime; + this.QueueLimit = queueLimit; + this.QueueCount = queueCount; + } + + /// + /// The time waiting in the queue. + /// + public TimeSpan QueueTime { get; } + + public int QueueLimit { get; } + + public int QueueCount { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/ItemEnqueuedEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/ItemEnqueuedEventArgs.cs new file mode 100644 index 0000000..dc04e4f --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/ItemEnqueuedEventArgs.cs @@ -0,0 +1,21 @@ +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Event args for the task enqueued event. + /// + public class ItemEnqueuedEventArgs : ItemEventArgs + { + internal ItemEnqueuedEventArgs(Priority priority, int queueLimit, int queueCount) + : base(priority) + { + this.QueueLimit = queueLimit; + this.QueueCount = queueCount; + } + + public int QueueLimit { get; } + + public int QueueCount { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/ItemEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/ItemEventArgs.cs new file mode 100644 index 0000000..e3c37de --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/ItemEventArgs.cs @@ -0,0 +1,20 @@ +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Event args for task rejected event. + /// + public class ItemEventArgs + { + protected ItemEventArgs(Priority priority) + { + this.Priority = priority; + } + + /// + /// The priority of the task. + /// + public Priority Priority { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/ItemProcessedEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/ItemProcessedEventArgs.cs new file mode 100644 index 0000000..3e194b6 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/ItemProcessedEventArgs.cs @@ -0,0 +1,33 @@ +using Farfetch.LoadShedding.Tasks; +using System; + +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Event args for task processed event. + /// + public class ItemProcessedEventArgs : ItemEventArgs + { + internal ItemProcessedEventArgs(Priority priority, TimeSpan processingTime, int concurrencyLimit, int concurrencyCount) : base(priority) + { + this.ProcessingTime = processingTime; + this.ConcurrencyLimit = concurrencyLimit; + this.ConcurrencyCount = concurrencyCount; + } + + /// + /// Time spent to process the task. + /// + public TimeSpan ProcessingTime { get; } + + /// + /// The current concurrency limit. + /// + public int ConcurrencyLimit { get; } + + /// + /// The current concurrency items count. + /// + public int ConcurrencyCount { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/ItemProcessingEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/ItemProcessingEventArgs.cs new file mode 100644 index 0000000..a38892a --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/ItemProcessingEventArgs.cs @@ -0,0 +1,26 @@ +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Event args for task procssing event. + /// + public class ItemProcessingEventArgs : ItemEventArgs + { + internal ItemProcessingEventArgs(Priority priority, int concurrencyLimit, int concurrencyCount) : base(priority) + { + this.ConcurrencyLimit = concurrencyLimit; + this.ConcurrencyCount = concurrencyCount; + } + + /// + /// The current concurrency limit. + /// + public int ConcurrencyLimit { get; } + + /// + /// The current concurrency items count. + /// + public int ConcurrencyCount { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/ItemRejectedEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/ItemRejectedEventArgs.cs new file mode 100644 index 0000000..7199297 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/ItemRejectedEventArgs.cs @@ -0,0 +1,18 @@ +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Event args for task rejected event. + /// + public class ItemRejectedEventArgs : ItemEventArgs + { + internal ItemRejectedEventArgs(Priority priority, string reason) + : base(priority) + { + Reason = reason; + } + + public string Reason { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Args/LimitChangedEventArgs.cs b/src/Farfetch.LoadShedding/Events/Args/LimitChangedEventArgs.cs new file mode 100644 index 0000000..04cd0b5 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Args/LimitChangedEventArgs.cs @@ -0,0 +1,18 @@ +namespace Farfetch.LoadShedding.Events.Args +{ + /// + /// Events args for limits changed event. + /// + public class LimitChangedEventArgs + { + internal LimitChangedEventArgs(int limit) + { + this.Limit = limit; + } + + /// + /// The current limit. + /// + public int Limit { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/Event.cs b/src/Farfetch.LoadShedding/Events/Event.cs new file mode 100644 index 0000000..5ca1fd6 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/Event.cs @@ -0,0 +1,37 @@ +namespace Farfetch.LoadShedding +{ + using Farfetch.LoadShedding.Events; + using System; + using System.Collections.Generic; + + internal class Event : IEvent + { + private readonly List> _handlers = new List>(); + + public IEventSubscription Subscribe(Action handler) + { + this._handlers.Add(handler); + return new EventSubscription(() => this._handlers.Remove(handler)); + } + + public void Raise(TArg arg) + { + foreach (var handler in this._handlers) + { + try + { + if (handler is null) + { + continue; + } + + handler.Invoke(arg); + } + catch (Exception) + { + // Exceptions on the subscribers should be ignored. + } + } + } + } +} diff --git a/src/Farfetch.LoadShedding/Events/EventSubscription.cs b/src/Farfetch.LoadShedding/Events/EventSubscription.cs new file mode 100644 index 0000000..f533868 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/EventSubscription.cs @@ -0,0 +1,19 @@ +using System; + +namespace Farfetch.LoadShedding.Events +{ + internal class EventSubscription : IEventSubscription + { + private readonly Action _cancelHandler; + + public EventSubscription(Action cancelHandler) + { + this._cancelHandler = cancelHandler; + } + + public void Cancel() + { + this._cancelHandler.Invoke(); + } + } +} diff --git a/src/Farfetch.LoadShedding/Events/IEvent.cs b/src/Farfetch.LoadShedding/Events/IEvent.cs new file mode 100644 index 0000000..73446fa --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/IEvent.cs @@ -0,0 +1,24 @@ +namespace Farfetch.LoadShedding +{ + using Farfetch.LoadShedding.Events; + using System; + + /// + /// Represents an Event to be subscribed. + /// + /// The argument expected by the event. + public interface IEvent + { + /// + /// Subscribes to the event. + /// + /// The handler to be called when the event is fired. + IEventSubscription Subscribe(Action handler); + + /// + /// Raises the event and notify all the subscribers. + /// + /// The event arguments. + void Raise(TArg arg); + } +} diff --git a/src/Farfetch.LoadShedding/Events/IEventSubscription.cs b/src/Farfetch.LoadShedding/Events/IEventSubscription.cs new file mode 100644 index 0000000..604ee27 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/IEventSubscription.cs @@ -0,0 +1,13 @@ +namespace Farfetch.LoadShedding.Events +{ + /// + /// Represents a reference to the event subscription. + /// + public interface IEventSubscription + { + /// + /// Cancels the event subscription. + /// + void Cancel(); + } +} diff --git a/src/Farfetch.LoadShedding/Events/ILoadSheddingEvents.cs b/src/Farfetch.LoadShedding/Events/ILoadSheddingEvents.cs new file mode 100644 index 0000000..3e16342 --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/ILoadSheddingEvents.cs @@ -0,0 +1,42 @@ +using Farfetch.LoadShedding.Events.Args; + +namespace Farfetch.LoadShedding.Events +{ + public interface ILoadSheddingEvents + { + /// + /// Event triggered when the concurrency limit is changed. + /// + IEvent ConcurrencyLimitChanged { get; } + + /// + /// Event triggered when the queue limit is changed. + /// + IEvent QueueLimitChanged { get; } + + /// + /// Event triggered when some task has been started to process. + /// + IEvent ItemProcessing { get; } + + /// + /// Event triggered when some task has been finished. + /// + IEvent ItemProcessed { get; } + + /// + /// Event triggered when some task has been enqueued. + /// + IEvent ItemEnqueued { get; } + + /// + /// Event triggered when some task has been dequeued. + /// + IEvent ItemDequeued { get; } + + /// + /// Event triggered when some task has been rejected. + /// + IEvent Rejected { get; } + } +} diff --git a/src/Farfetch.LoadShedding/Events/LoadSheddingEvents.cs b/src/Farfetch.LoadShedding/Events/LoadSheddingEvents.cs new file mode 100644 index 0000000..14c02ab --- /dev/null +++ b/src/Farfetch.LoadShedding/Events/LoadSheddingEvents.cs @@ -0,0 +1,56 @@ +using Farfetch.LoadShedding.Events.Args; + +namespace Farfetch.LoadShedding.Events +{ + /// + /// Responsible for declare the contract of the events listener. + /// + internal class LoadSheddingEvents : ILoadSheddingEvents + { + public LoadSheddingEvents() + { + this.ConcurrencyLimitChanged = new Event(); + this.QueueLimitChanged = new Event(); + this.ItemProcessing = new Event(); + this.ItemProcessed = new Event(); + this.ItemEnqueued = new Event(); + this.ItemDequeued = new Event(); + this.Rejected = new Event(); + } + + /// + /// Event triggered when the concurrency limit is changed. + /// + public IEvent ConcurrencyLimitChanged { get; set; } + + /// + /// Event triggered when the queue limit is changed. + /// + public IEvent QueueLimitChanged { get; set; } + + /// + /// Event triggered when some task has been started to process. + /// + public IEvent ItemProcessing { get; set; } + + /// + /// Event triggered when some task has been finished. + /// + public IEvent ItemProcessed { get; set; } + + /// + /// Event triggered when some task has been enqueued. + /// + public IEvent ItemEnqueued { get; set; } + + /// + /// Event triggered when some task has been dequeued. + /// + public IEvent ItemDequeued { get; set; } + + /// + /// Event triggered when some task has been rejected. + /// + public IEvent Rejected { get; set; } + } +} diff --git a/src/Farfetch.LoadShedding/Exceptions/InvalidConfigurationException.cs b/src/Farfetch.LoadShedding/Exceptions/InvalidConfigurationException.cs new file mode 100644 index 0000000..de96e19 --- /dev/null +++ b/src/Farfetch.LoadShedding/Exceptions/InvalidConfigurationException.cs @@ -0,0 +1,18 @@ +using System; + +namespace Farfetch.LoadShedding.Exceptions +{ + /// + /// Represents an exception where some configuration is invalid. + /// + public class InvalidConfigurationException : Exception + { + /// + /// Constructs the custom exception. + /// + /// Receives a message to be passed to the exception. + public InvalidConfigurationException(string message) : base(message) + { + } + } +} diff --git a/src/Farfetch.LoadShedding/Exceptions/LimitReachedException.cs b/src/Farfetch.LoadShedding/Exceptions/LimitReachedException.cs new file mode 100644 index 0000000..0a445ac --- /dev/null +++ b/src/Farfetch.LoadShedding/Exceptions/LimitReachedException.cs @@ -0,0 +1,18 @@ +using System; + +namespace Farfetch.LoadShedding.Exceptions +{ + /// + /// Represents an exception where a maximum limit defined was reached. + /// + public class LimitReachedException : Exception + { + /// + /// Constructs the custom exception. + /// + /// Receives a message to be passed to the exception. + public LimitReachedException(string message) : base(message) + { + } + } +} diff --git a/src/Farfetch.LoadShedding/Exceptions/QueueLimitReachedException.cs b/src/Farfetch.LoadShedding/Exceptions/QueueLimitReachedException.cs new file mode 100644 index 0000000..f0442d4 --- /dev/null +++ b/src/Farfetch.LoadShedding/Exceptions/QueueLimitReachedException.cs @@ -0,0 +1,18 @@ +namespace Farfetch.LoadShedding.Exceptions +{ + /// + /// Represents an exception where the queue limit defined was reached. + /// + public class QueueLimitReachedException : LimitReachedException + { + /// + /// Constructs the custom exception. + /// + /// The current queue limit. + public QueueLimitReachedException(int limit) + : base($"The maximum queue limit of {limit} is reached.") + { + + } + } +} diff --git a/src/Farfetch.LoadShedding/Exceptions/QueueTimeoutException.cs b/src/Farfetch.LoadShedding/Exceptions/QueueTimeoutException.cs new file mode 100644 index 0000000..a53c766 --- /dev/null +++ b/src/Farfetch.LoadShedding/Exceptions/QueueTimeoutException.cs @@ -0,0 +1,19 @@ +using System; + +namespace Farfetch.LoadShedding.Exceptions +{ + /// + /// Represents an exception where the queue timeout defined was reached. + /// + public class QueueTimeoutException : TimeoutException + { + /// + /// Constructs the custom exception. + /// + /// The current queue timeout. + public QueueTimeoutException(int timeout) + : base($"The maximum queue timeout of {timeout} was reached.") + { + } + } +} diff --git a/src/Farfetch.LoadShedding/Farfetch.LoadShedding.csproj b/src/Farfetch.LoadShedding/Farfetch.LoadShedding.csproj new file mode 100644 index 0000000..101cb50 --- /dev/null +++ b/src/Farfetch.LoadShedding/Farfetch.LoadShedding.csproj @@ -0,0 +1,13 @@ + + + + netstandard2.0 + + + + + + + + + diff --git a/src/Farfetch.LoadShedding/Limiters/AdaptativeConcurrencyLimiter.cs b/src/Farfetch.LoadShedding/Limiters/AdaptativeConcurrencyLimiter.cs new file mode 100644 index 0000000..7fccc96 --- /dev/null +++ b/src/Farfetch.LoadShedding/Limiters/AdaptativeConcurrencyLimiter.cs @@ -0,0 +1,119 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Farfetch.LoadShedding.Calculators; +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Measures; +using Farfetch.LoadShedding.Events; +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.Limiters +{ + internal class AdaptativeConcurrencyLimiter : IAdaptativeConcurrencyLimiter, IDisposable + { + private static readonly TimeSpan s_updateCheckInterval = TimeSpan.FromMilliseconds(500); + + private const int MaxPercentageForRecovering = 10; + private const int MinPercentageForUpdatingLimits = 50; + + private readonly Timer _updater; + private readonly ITaskManager _taskManager; + private readonly ConcurrencyContext _context; + private readonly ILimitCalculator _limitCalculator; + private readonly IQueueSizeCalculator _queueCalculator; + private readonly RTTMeasures _measures; + + public AdaptativeConcurrencyLimiter( + ConcurrencyOptions concurrencyOptions, + ILimitCalculator limitCalculator, + IQueueSizeCalculator queueCalculator, + ILoadSheddingEvents events) + { + this._limitCalculator = limitCalculator; + this._queueCalculator = queueCalculator; + + this._measures = new RTTMeasures(concurrencyOptions.Tolerance); + + this._taskManager = new TaskManager( + concurrencyOptions.InitialConcurrencyLimit, + concurrencyOptions.InitialQueueSize, + concurrencyOptions.QueueTimeoutInMs, + events); + + this._context = new ConcurrencyContext(this._taskManager, this._measures); + this._updater = new Timer(_ => this.UpdateLimits(), null, TimeSpan.Zero, s_updateCheckInterval); + } + + /// + /// It is responsible for managing the adaptative semaphore for the current request. + /// As soon as the request is executed, the data is collected to determine the performance degradation. + /// + public Task ExecuteAsync(Func function, CancellationToken cancellationToken = default) + { + return this.ExecuteAsync(0, function, cancellationToken); + } + + /// + /// It is responsible for managing the adaptative semaphore for the current request. + /// As soon as the request is executed, the data is collected to determine the performance degradation. + /// + public async Task ExecuteAsync(Priority priority, Func function, CancellationToken cancellationToken = default) + { + using (var item = await _taskManager.AcquireAsync(priority, cancellationToken)) + { + try + { + var delayTask = Task.Delay(Timeout.Infinite, cancellationToken); + + var resultTask = await Task.WhenAny( + function.Invoke(), + delayTask); + + if (delayTask == resultTask) + { + cancellationToken.ThrowIfCancellationRequested(); + } + } + finally + { + item.Complete(); + + this._measures.AddSample(item.ProcessingTime.TotalMilliseconds); + } + } + } + + public void Dispose() + { + this._updater.Dispose(); + GC.SuppressFinalize(this); + } + + /// + /// Responsible for adapt the concurrency limit and the max queue size according with the service performance in runtime based. + /// + private void UpdateLimits() + { + try + { + var currentPercentage = this._taskManager.UsagePercentage; + + if (currentPercentage <= MaxPercentageForRecovering) + { + this._measures.RecoverFromLoad(); + return; + } + + if (currentPercentage >= MinPercentageForUpdatingLimits) + { + this._taskManager.ConcurrencyLimit = this._limitCalculator.CalculateLimit(this._context); + this._taskManager.QueueLimit = this._queueCalculator.CalculateQueueSize(this._context); + } + } + finally + { + this._context.Snapshot(); + } + } + } +} diff --git a/src/Farfetch.LoadShedding/Limiters/IAdaptativeConcurrencyLimiter.cs b/src/Farfetch.LoadShedding/Limiters/IAdaptativeConcurrencyLimiter.cs new file mode 100644 index 0000000..6cf332f --- /dev/null +++ b/src/Farfetch.LoadShedding/Limiters/IAdaptativeConcurrencyLimiter.cs @@ -0,0 +1,30 @@ +using Farfetch.LoadShedding.Tasks; +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Farfetch.LoadShedding.Limiters +{ + /// + /// Defines the contract that the limiters should follow. + /// + public interface IAdaptativeConcurrencyLimiter + { + /// + /// Responsible for managing the current request. + /// + /// The priority of the execution of the task. + /// A function that represents the request that has been processed. + /// A cancellation token is used to signal that the running operation should be stopped. + /// + Task ExecuteAsync(Priority priority, Func function, CancellationToken cancellationToken = default); + + /// + /// Responsible for managing the current request. + /// + /// A function that represents the request that has been processed. + /// A cancellation token is used to signal that the running operation should be stopped. + /// + Task ExecuteAsync(Func function, CancellationToken cancellationToken = default); + } +} diff --git a/src/Farfetch.LoadShedding/Measures/RTTMeasures.cs b/src/Farfetch.LoadShedding/Measures/RTTMeasures.cs new file mode 100644 index 0000000..739be61 --- /dev/null +++ b/src/Farfetch.LoadShedding/Measures/RTTMeasures.cs @@ -0,0 +1,99 @@ +using System; +using System.Runtime.CompilerServices; +using System.Threading; + +[assembly: InternalsVisibleTo("Farfetch.LoadShedding.Tests")] + +namespace Farfetch.LoadShedding.Measures +{ + /// + /// Contains the round trip time measures. + /// + internal class RTTMeasures + { + /// + /// After a service overload, response times are likely to be higher than usual. + /// Therefore, it becomes necessary to restore the response time metrics to their normal values. + /// This can be achieved by gradually decreasing the avg RTT that acts as a + /// mechanism to bring the response time back to its normal level. + /// In this case it will decrease the avg RTT by 10% each iteration (normally 1 second) until it is stabilized. + /// + private const double StabilizationFactor = 0.9; + + private readonly double _tolerance; + + private long _numberOfExecutions; + private double _totalTime; + private long _avgRTT; + + /// + /// Initializes an instance of the class defining the tolerance parameter. + /// + /// A tolerance to control the trade-off between the minimum and the average RTT. + public RTTMeasures(double tolerance) + { + this._tolerance = tolerance; + this.Reset(); + } + + /// + /// Gets the average round trip time. + /// + public long AvgRTT => this._avgRTT; + + /// + /// Gets the total number of executions. + /// + public long TotalCount => this._numberOfExecutions; + + /// + /// Gets the minimum round trip time. + /// + public long MinRTT { get; private set; } = int.MaxValue; + + /// + /// Responsible for tracking specific data to determine performance degradation. + /// In each request: the round trip time is accumulated; the number of requests is increased; the average round trip time is calculated; + /// and the minimum round trip time is maintained. + /// + /// Receives the duration of a request. + public void AddSample(double durationInMs) + { + this._totalTime += durationInMs; + this._numberOfExecutions++; + + Interlocked.Exchange(ref this._avgRTT, (int)Math.Ceiling(this._totalTime / this._numberOfExecutions)); + + if (this.AvgRTT < this.MinRTT) + { + this.MinRTT = this.AvgRTT; + } + } + + internal void RecoverFromLoad() + { + if (this._isRecovered) + { + return; + } + + Interlocked.Exchange(ref this._totalTime, (int)Math.Floor(this._totalTime * StabilizationFactor)); + Interlocked.Exchange(ref this._avgRTT, (int)Math.Floor(this._totalTime / this._numberOfExecutions)); + + if (this._isRecovered) + { + this.Reset(); + } + } + + private bool _isRecovered => (this.MinRTT * this._tolerance) > this.AvgRTT; + + private void Reset() + { + this._numberOfExecutions = 0; + this._totalTime = 0; + this._avgRTT = 0; + this.MinRTT = int.MaxValue; + } + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/ConcurrentCounter.cs b/src/Farfetch.LoadShedding/Tasks/ConcurrentCounter.cs new file mode 100644 index 0000000..1d3cf82 --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/ConcurrentCounter.cs @@ -0,0 +1,76 @@ +namespace Farfetch.LoadShedding.Tasks +{ + internal class ConcurrentCounter + { + private readonly object _locker = new object(); + + private int _count = 0; + + public int Count => this._count; + + public int Limit { get; set; } + + public int AvailableCount + { + get + { + lock (this._locker) + { + return this.Limit - this._count; + } + } + } + + public double UsagePercentage + { + get + { + lock (this._locker) + { + return (this._count * 100.0) / this.Limit; + } + } + } + + public int Increment() + { + lock (this._locker) + { + this._count++; + + return _count; + } + } + + public bool TryIncrement(out int result) + { + var increased = false; + + lock (this._locker) + { + if (this._count < this.Limit) + { + this._count++; + increased = true; + } + + result = this._count; + } + + return increased; + } + + public int Decrement() + { + lock (this._locker) + { + if (this._count > 0) + { + this._count--; + } + + return this._count; + } + } + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/ITaskManager.cs b/src/Farfetch.LoadShedding/Tasks/ITaskManager.cs new file mode 100644 index 0000000..d2db38e --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/ITaskManager.cs @@ -0,0 +1,20 @@ +using System.Threading.Tasks; +using System.Threading; + +namespace Farfetch.LoadShedding.Tasks +{ + internal interface ITaskManager + { + int ConcurrencyLimit { get; set; } + + int ConcurrencyCount { get; } + + int QueueLimit { get; set; } + + int QueueCount { get; } + + double UsagePercentage { get; } + + Task AcquireAsync(Priority priority, CancellationToken cancellationToken = default); + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/Priority.cs b/src/Farfetch.LoadShedding/Tasks/Priority.cs new file mode 100644 index 0000000..1790742 --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/Priority.cs @@ -0,0 +1,12 @@ +namespace Farfetch.LoadShedding.Tasks +{ + /// + /// Task priorities. + /// + public enum Priority + { + Critical = 0, + Normal = 1, + NonCritical = 2 + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/TaskItem.cs b/src/Farfetch.LoadShedding/Tasks/TaskItem.cs new file mode 100644 index 0000000..5f1f095 --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/TaskItem.cs @@ -0,0 +1,109 @@ +using System; +using System.Diagnostics; +using System.Threading.Tasks; +using System.Threading; + +namespace Farfetch.LoadShedding.Tasks +{ + internal class TaskItem : IDisposable + { + private readonly TaskCompletionSource _taskSource; + private readonly Stopwatch _lifetime; + + private Stopwatch _waitingTime; + + public Priority Priority { get; private set; } + + public TaskItem(Priority priority) + { + this.Priority = priority; + this._lifetime = Stopwatch.StartNew(); + this._taskSource = new TaskCompletionSource(); + } + + public TaskResult Status { get; private set; } = TaskResult.Pending; + + public TimeSpan WaitingTime => this._waitingTime?.Elapsed ?? TimeSpan.Zero; + + public TimeSpan ProcessingTime => this._lifetime.Elapsed - this.WaitingTime; + + public Action OnCompleted { get; set; } + + public bool IsWaiting => this.Status == TaskResult.Pending || this.Status == TaskResult.Waiting; + + public async Task WaitAsync(int timeout, CancellationToken token) + { + if (this._taskSource.Task.IsCompleted) + { + return; + } + + this.Status = TaskResult.Waiting; + + this._waitingTime = Stopwatch.StartNew(); + + var timeoutTask = Task.Delay(timeout, token); + + var returnedTask = await Task + .WhenAny(this._taskSource.Task, timeoutTask) + .ConfigureAwait(false); + + this._waitingTime.Stop(); + + if (returnedTask == timeoutTask) + { + this.Timeout(); + } + } + + private void Timeout() + { + if (this.TryChangeStatus(TaskResult.Timeout)) + { + this._lifetime.Stop(); + } + } + + public void Reject() + { + if (this.TryChangeStatus(TaskResult.Rejected)) + { + this._lifetime.Stop(); + } + } + + public void Complete() + { + if (this.TryChangeStatus(TaskResult.Completed)) + { + this._lifetime.Stop(); + this.OnCompleted?.Invoke(); + } + } + + public void Process() + { + this.Status = TaskResult.Processing; + this._taskSource.TrySetResult(true); + } + + public void Dispose() + { + this._waitingTime?.Stop(); + this._lifetime.Stop(); + } + + private bool TryChangeStatus(TaskResult status) + { + if (this.Status == status) + { + return false; + } + + this.Status = status; + this._taskSource.TrySetResult(true); + + return true; + } + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/TaskItemList.cs b/src/Farfetch.LoadShedding/Tasks/TaskItemList.cs new file mode 100644 index 0000000..2414cbc --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/TaskItemList.cs @@ -0,0 +1,65 @@ +using System.Collections.Generic; +using System.Linq; + +namespace Farfetch.LoadShedding.Tasks +{ + internal class TaskItemList + { + private readonly object _locker = new object(); + + private readonly IList _items = new List(); + + public bool HasItems => this._items.Count > 0; + + public void Add(TaskItem item) + { + lock (this._locker) + { + this._items.Add(item); + } + } + + public TaskItem Dequeue() + { + lock (this._locker) + { + var item = this._items.FirstOrDefault(x => x.IsWaiting); + + this._items.Remove(item); + + return item; + } + } + + public TaskItem DequeueLast() + { + lock (this._locker) + { + var item = this._items.LastOrDefault(x => x.IsWaiting); + + if (item != null) + { + this._items.Remove(item); + } + + return item; + } + } + + public bool Remove(TaskItem item) + { + lock (this._locker) + { + return this._items.Remove(item); + } + } + + public void Clear() + { + lock (this._locker) + { + this._items.Clear(); + } + } + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/TaskManager.cs b/src/Farfetch.LoadShedding/Tasks/TaskManager.cs new file mode 100644 index 0000000..d1fe967 --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/TaskManager.cs @@ -0,0 +1,200 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Farfetch.LoadShedding.Exceptions; +using Farfetch.LoadShedding.Events; +using Farfetch.LoadShedding.Events.Args; +using Farfetch.LoadShedding.Constants; + +namespace Farfetch.LoadShedding.Tasks +{ + internal class TaskManager : ITaskManager + { + private readonly TaskQueue _taskQueue; + + private readonly ConcurrentCounter _counter = new ConcurrentCounter(); + + private readonly int _queueTimeout; + private readonly ILoadSheddingEvents _events; + + public TaskManager( + int concurrencyLimit, + int maxQueueSize, + int queueTimeout = Timeout.Infinite, + ILoadSheddingEvents events = null) + { + this._counter.Limit = concurrencyLimit; + this._queueTimeout = queueTimeout; + + this._events = events; + + this._taskQueue = new TaskQueue(maxQueueSize) + { + OnItemEnqueued = (count, item) => this.NotifyItemEnqueued(count, item), + OnItemDequeued = (count, item) => this.NotifyItemDequeued(count, item) + }; + + this._events?.ConcurrencyLimitChanged?.Raise(new LimitChangedEventArgs(this._counter.Limit)); + this._events?.QueueLimitChanged?.Raise(new LimitChangedEventArgs(maxQueueSize)); + } + + public int ConcurrencyLimit + { + get => this._counter.Limit; + set + { + if (value < 1) + { + throw new ArgumentException("Must be greater than or equal to 1.", nameof(this.ConcurrencyLimit)); + } + + if (value == this._counter.Limit) + { + return; + } + + this._counter.Limit = value; + + this.NotifyConcurrencyLimitChanged(); + this.ProcessPendingTasks(); + } + } + + public int QueueLimit + { + get => this._taskQueue.Limit; + set + { + if (value == this._taskQueue.Limit) + { + return; + } + + this._taskQueue.Limit = value; + + this._events?.QueueLimitChanged?.Raise(new LimitChangedEventArgs(this._taskQueue.Limit)); + } + } + + public int QueueCount => this._taskQueue.Count; + + public int ConcurrencyCount => this._counter.Count; + + public double UsagePercentage => this._counter.UsagePercentage; + + public async Task AcquireAsync(Priority priority, CancellationToken cancellationToken = default) + { + var item = this.CreateTask(priority); + + if (this._counter.TryIncrement(out var currentCount)) + { + item.Process(); + + this.NotifyItemProcessing(item, currentCount); + + return item; + } + + this._taskQueue.Enqueue(item); + + if (item.Status != TaskResult.Rejected) + { + try + { + await item + .WaitAsync(this._queueTimeout, cancellationToken) + .ConfigureAwait(false); + } + finally + { + this._taskQueue.Remove(item); + } + } + + switch (item.Status) + { + case TaskResult.Rejected: + this.NotifyItemRejected(item, RejectionReasons.MaxQueueItems); + throw new QueueLimitReachedException(this._taskQueue.Limit); + case TaskResult.Timeout: + cancellationToken.ThrowIfCancellationRequested(); + this.NotifyItemRejected(item, RejectionReasons.QueueTimeout); + throw new QueueTimeoutException(this._queueTimeout); + default: + break; + } + + this.NotifyItemProcessing(item, this._counter.Increment()); + + return item; + } + + private TaskItem CreateTask(Priority priority) + { + var item = new TaskItem(priority); + + item.OnCompleted = () => + { + var count = this._counter.Decrement(); + + var processNext = count < this._counter.Limit; + + this.NotifyItemProcessed(item, count); + + if (processNext) + { + this._taskQueue.Dequeue()?.Process(); + } + }; + + return item; + } + + private void NotifyItemRejected(TaskItem item, string reason) + => this._events?.Rejected?.Raise(new ItemRejectedEventArgs( + item.Priority, + reason)); + + private void NotifyItemProcessed(TaskItem item, int count) + => this._events?.ItemProcessed?.Raise(new ItemProcessedEventArgs( + item.Priority, + item.ProcessingTime, + this.ConcurrencyLimit, + count)); + + private void NotifyItemProcessing(TaskItem item, int count) + => this._events?.ItemProcessing?.Raise(new ItemProcessingEventArgs( + item.Priority, + this.ConcurrencyLimit, + count)); + + private void NotifyConcurrencyLimitChanged() + => this._events?.ConcurrencyLimitChanged?.Raise(new LimitChangedEventArgs(this._counter.Limit)); + + private void NotifyItemDequeued(int count, TaskItem item) => this._events?.ItemDequeued?.Raise(new ItemDequeuedEventArgs( + item.Priority, + item.WaitingTime, + this._taskQueue.Limit, + count)); + + private void NotifyItemEnqueued(int count, TaskItem item) => this._events?.ItemEnqueued?.Raise(new ItemEnqueuedEventArgs( + item.Priority, + this._taskQueue.Limit, + count)); + + private void ProcessPendingTasks() + { + while (this._counter.AvailableCount > 0) + { + var item = this._taskQueue.Dequeue(); + + if (item == null) + { + break; + } + + item.Process(); + } + } + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/TaskQueue.cs b/src/Farfetch.LoadShedding/Tasks/TaskQueue.cs new file mode 100644 index 0000000..2f113ce --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/TaskQueue.cs @@ -0,0 +1,114 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; + +[assembly: InternalsVisibleTo("Farfetch.LoadShedding.BenchmarkTests")] + +namespace Farfetch.LoadShedding.Tasks +{ + internal class TaskQueue + { + private readonly ConcurrentCounter _counter = new ConcurrentCounter(); + + private readonly IDictionary _queues = new SortedDictionary() + { + [Priority.Critical] = new TaskItemList(), + [Priority.Normal] = new TaskItemList(), + [Priority.NonCritical] = new TaskItemList(), + }; + + public TaskQueue(int limit) + { + this._counter.Limit = limit; + } + + public int Count => this._counter.Count; + + public int Limit + { + get => this._counter.Limit; + set => this._counter.Limit = value; + } + + public Action OnItemEnqueued { get; set; } + + public Action OnItemDequeued { get; set; } + + public void Enqueue(TaskItem item) + { + int count = this.EnqueueItem(item); + + if (count > this.Limit) + { + this.RejectLastItem(); + } + } + + public TaskItem Dequeue() + { + var nextQueueItem = this._queues + .FirstOrDefault(x => x.Value.HasItems) + .Value? + .Dequeue(); + + if (nextQueueItem != null) + { + this.DecrementCounter(nextQueueItem); + } + + return nextQueueItem; + } + + public void Remove(TaskItem item) + { + if (this._queues[item.Priority].Remove(item)) + { + this.DecrementCounter(item); + } + } + + internal void Clear() + { + foreach (var queue in this._queues) + { + queue.Value.Clear(); + } + } + + private int EnqueueItem(TaskItem item) + { + this._queues[item.Priority].Add(item); + + var count = this._counter.Increment(); + + this.OnItemEnqueued?.Invoke(count, item); + + return count; + } + + private void RejectLastItem() + { + var lastItem = this._queues + .LastOrDefault(x => x.Value.HasItems) + .Value? + .DequeueLast(); + + if (lastItem == null) + { + return; + } + + this._counter.Decrement(); + + this.DecrementCounter(lastItem); + + lastItem.Reject(); + } + + private void DecrementCounter(TaskItem nextQueueItem) + { + this.OnItemDequeued?.Invoke(this._counter.Decrement(), nextQueueItem); + } + } +} diff --git a/src/Farfetch.LoadShedding/Tasks/TaskResult.cs b/src/Farfetch.LoadShedding/Tasks/TaskResult.cs new file mode 100644 index 0000000..b592b19 --- /dev/null +++ b/src/Farfetch.LoadShedding/Tasks/TaskResult.cs @@ -0,0 +1,12 @@ +namespace Farfetch.LoadShedding.Tasks +{ + internal enum TaskResult + { + Pending, + Waiting, + Processing, + Timeout, + Rejected, + Completed + } +} diff --git a/src/StyleCopAnalyzersDefault.ruleset b/src/StyleCopAnalyzersDefault.ruleset new file mode 100644 index 0000000..233fb45 --- /dev/null +++ b/src/StyleCopAnalyzersDefault.ruleset @@ -0,0 +1,217 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/stylecop.json b/src/stylecop.json new file mode 100644 index 0000000..0ac7974 --- /dev/null +++ b/src/stylecop.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://raw.githubusercontent.com/DotNetAnalyzers/StyleCopAnalyzers/master/StyleCop.Analyzers/StyleCop.Analyzers/Settings/stylecop.schema.json", + "settings": { + "documentationRules": { + "documentInterfaces": false, + "documentExposedElements": true, + "documentInternalElements": false, + "documentPrivateElements": false, + "documentPrivateFields": false, + "xmlHeader": false + }, + "layoutRules": { + "newlineAtEndOfFile": "require" + }, + "orderingRules": { + "systemUsingDirectivesFirst": true, + "usingDirectivesPlacement": "outsideNamespace" + } + } +} diff --git a/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/AdaptativeLimiterBenchmarks.cs b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/AdaptativeLimiterBenchmarks.cs new file mode 100644 index 0000000..5af211b --- /dev/null +++ b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/AdaptativeLimiterBenchmarks.cs @@ -0,0 +1,44 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Order; +using Farfetch.LoadShedding.AspNetCore.Middlewares; +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Builders; +using Farfetch.LoadShedding.Limiters; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; + +namespace Farfetch.LoadShedding.BenchmarkTests +{ + [MemoryDiagnoser] + [ThreadingDiagnoser] + [Orderer(SummaryOrderPolicy.Declared)] + [MinColumn, MaxColumn] + [IterationCount(10)] + [RankColumn] + public class AdaptativeLimiterBenchmarks + { + private static readonly IPriorityResolver s_randomResolver = new CustomPriorityResolver(_ => Task.FromResult((Priority)Random.Shared.Next(3))); + + private static readonly HttpContext s_context = new DefaultHttpContext(); + + private static readonly IAdaptativeConcurrencyLimiter s_limiter = new AdaptativeLimiterBuilder() + .WithOptions(options => options.MaxConcurrencyLimit = int.MaxValue) + .Build(); + + private static readonly AdaptativeConcurrencyLimiterMiddleware s_middleware = new(context => Task.CompletedTask, s_limiter, null); + + private static readonly AdaptativeConcurrencyLimiterMiddleware s_middlewareWithRandomPriority = new(context => Task.CompletedTask, s_limiter, s_randomResolver); + + [Benchmark] + public async Task Limiter_Default() => await s_limiter.ExecuteAsync(() => Task.CompletedTask); + + [Benchmark] + public async Task Limiter_RandomPriority() => await s_limiter.ExecuteAsync((Priority)Random.Shared.Next(3), () => Task.CompletedTask); + + [Benchmark] + public async Task LimiterMiddleware_Default() => await s_middleware.InvokeAsync(s_context); + + [Benchmark] + public async Task LimiterMiddleware_RandomPriority() => await s_middlewareWithRandomPriority.InvokeAsync(s_context); + } +} diff --git a/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/Farfetch.LoadShedding.BenchmarkTests.csproj b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/Farfetch.LoadShedding.BenchmarkTests.csproj new file mode 100644 index 0000000..ba5004c --- /dev/null +++ b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/Farfetch.LoadShedding.BenchmarkTests.csproj @@ -0,0 +1,20 @@ + + + + Exe + net8.0 + enable + enable + + + + + + + + + + + + + diff --git a/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/Program.cs b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/Program.cs new file mode 100644 index 0000000..8506737 --- /dev/null +++ b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/Program.cs @@ -0,0 +1,5 @@ +using BenchmarkDotNet.Running; +using Farfetch.LoadShedding.BenchmarkTests; + +BenchmarkRunner.Run(); +BenchmarkRunner.Run(); diff --git a/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/TaskQueueBenchmarks.cs b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/TaskQueueBenchmarks.cs new file mode 100644 index 0000000..75d62d7 --- /dev/null +++ b/tests/benchmark/Farfetch.LoadShedding.BenchmarkTests/TaskQueueBenchmarks.cs @@ -0,0 +1,59 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Order; +using Farfetch.LoadShedding.Tasks; + +namespace Farfetch.LoadShedding.BenchmarkTests +{ + [MemoryDiagnoser] + [ThreadingDiagnoser] + [Orderer(SummaryOrderPolicy.Declared)] + [MinColumn, MaxColumn] + [IterationCount(10)] + [RankColumn] + public class TaskQueueBenchmarks + { + private readonly TaskQueue _queue = new TaskQueue(int.MaxValue); + + private readonly TaskQueue _emptyQueue = new TaskQueue(int.MaxValue); + + private readonly TaskQueue _limitedQueue = new TaskQueue(1000); + + [IterationSetup] + public void Initialize() + { + this._queue.Clear(); + this._emptyQueue.Clear(); + this._limitedQueue.Clear(); + + while (this._queue.Count < 1000) + { + this._queue.Enqueue(GetTaskRandomPriority()); + } + + while (this._limitedQueue.Count < 1000) + { + this._limitedQueue.Enqueue(GetTaskRandomPriority()); + } + } + + [Benchmark] + public void TaskQueueWith1000Items_EnqueueFixedPriority() => this._queue.Enqueue(new TaskItem(0)); + + [Benchmark] + public void TaskQueueEmpty_EnqueueRandomPriority() => this._emptyQueue.Enqueue(GetTaskRandomPriority()); + + [Benchmark] + public void TaskQueueWith1000Items_EnqueueRandomPriority() => this._queue.Enqueue(GetTaskRandomPriority()); + + [Benchmark] + public void TaskQueueWith1000Items_Dequeue() => this._queue.Dequeue(); + + [Benchmark] + public void TaskQueue_EnqueueNewItem_LimitReached() => this._limitedQueue.Enqueue(new TaskItem(0)); + + private static TaskItem GetTaskRandomPriority() + { + return new TaskItem((Priority)Random.Shared.Next(3)); + } + } +} diff --git a/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Base/Controllers/WebApiTestController.cs b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Base/Controllers/WebApiTestController.cs new file mode 100644 index 0000000..7344a15 --- /dev/null +++ b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Base/Controllers/WebApiTestController.cs @@ -0,0 +1,31 @@ +using System.Diagnostics.CodeAnalysis; +using Farfetch.LoadShedding.AspNetCore.Attributes; +using Farfetch.LoadShedding.IntegrationTests.Base.Models; +using Microsoft.AspNetCore.Mvc; + +namespace Farfetch.LoadShedding.IntegrationTests.Base.Controllers +{ + [ApiController] + [Route("api")] + [ExcludeFromCodeCoverage] + public class WebApiTestController : ControllerBase + { + [HttpGet] + [Route("people")] + [EndpointPriority(Tasks.Priority.Critical)] + public async Task GetPeopleAsync() + { + await Task.Delay(500); + + return this.Ok(new[] + { + new Person + { + Id = 1, + Age = 18, + UserName = "john.doe" + } + }); + } + } +} diff --git a/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Base/Models/Person.cs b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Base/Models/Person.cs new file mode 100644 index 0000000..4622820 --- /dev/null +++ b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Base/Models/Person.cs @@ -0,0 +1,14 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Farfetch.LoadShedding.IntegrationTests.Base.Models +{ + [ExcludeFromCodeCoverage] + public class Person + { + public int Id { get; set; } + + public string? UserName { get; set; } + + public int Age { get; set; } + } +} diff --git a/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Farfetch.LoadShedding.IntegrationTests.csproj b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Farfetch.LoadShedding.IntegrationTests.csproj new file mode 100644 index 0000000..e472ba4 --- /dev/null +++ b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Farfetch.LoadShedding.IntegrationTests.csproj @@ -0,0 +1,36 @@ + + + + net8.0 + enable + enable + + false + + + + + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + + + + diff --git a/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Tests/Limiters/AdaptativeConcurrencyLimiterTests.cs b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Tests/Limiters/AdaptativeConcurrencyLimiterTests.cs new file mode 100644 index 0000000..5774987 --- /dev/null +++ b/tests/integration-tests/Farfetch.LoadShedding.IntegrationTests/Tests/Limiters/AdaptativeConcurrencyLimiterTests.cs @@ -0,0 +1,284 @@ +using System.Collections.Concurrent; +using System.Net; +using Farfetch.LoadShedding.AspNetCore.Options; +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Prometheus; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Builder; +using Microsoft.AspNetCore.Hosting; +using Microsoft.AspNetCore.TestHost; +using Microsoft.Extensions.DependencyInjection; +using Prometheus; +using Xunit; + +namespace Farfetch.LoadShedding.IntegrationTests.Tests.Limiters +{ + public class AdaptativeConcurrencyLimiterTests + { + private readonly ConcurrentBag _queueLimits = new ConcurrentBag(); + private readonly ConcurrentBag _concurrencyLimits = new ConcurrentBag(); + private readonly ConcurrentBag _enqueuedItems = new ConcurrentBag(); + private readonly CollectorRegistry _collectorRegistry; + + private int _numberOfRejectedRequests; + + public AdaptativeConcurrencyLimiterTests() + { + this._numberOfRejectedRequests = 0; + this._collectorRegistry = new CollectorRegistry(); + } + + [Fact] + public async Task GetAsync_WithReducedLimitAndQueueSize_SomeRequestsAreRejected() + { + // Arrange + const int InitialConcurrencyLimit = 100, InitialQueueSize = 4, MinSuccessfulRequests = 44; + + var options = new ConcurrencyOptions + { + QueueTimeoutInMs = 2, + InitialConcurrencyLimit = InitialConcurrencyLimit, + InitialQueueSize = InitialQueueSize, + MinQueueSize = InitialQueueSize + }; + + var client = this.GetClient(options); + + // Act + var tasks = Enumerable + .Range(0, 160) + .Select(_ => Task.Run(() => client.GetAsync("/api/people"))); + + var results = await Task.WhenAll(tasks.ToArray()); + + // Assert + Assert.True(results.Count(x => x.IsSuccessStatusCode) >= MinSuccessfulRequests); + Assert.Contains(results, x => x.StatusCode == HttpStatusCode.ServiceUnavailable); + await AssertMetrics(client); + } + + [Fact] + public async Task GetAsync_WithHighLimitAndQueueSize_NoneRequestsIsRejected() + { + // Arrange + const int InitialConcurrencyLimit = 160, InitialQueueSize = 4, ExpectedSuccessfulRequests = 160, ExpectedRejectedRequests = 0; + + var options = new ConcurrencyOptions + { + InitialConcurrencyLimit = InitialConcurrencyLimit, + InitialQueueSize = InitialQueueSize, + MinQueueSize = InitialQueueSize + }; + + var client = this.GetClient(options); + + // Act + var tasks = Enumerable + .Range(0, 160) + .Select(_ => Task.Run(() => client.GetAsync("/api/people"))); + + var results = await Task.WhenAll(tasks.ToArray()); + + // Assert + Assert.Equal(ExpectedSuccessfulRequests, results.Count(x => x.IsSuccessStatusCode)); + Assert.Equal(ExpectedRejectedRequests, results.Count(x => x.StatusCode == HttpStatusCode.ServiceUnavailable)); + } + + [Theory] + [InlineData("critical", Priority.Critical)] + [InlineData("normal", Priority.Normal)] + [InlineData("noncritical", Priority.NonCritical)] + [InlineData("non-critical", Priority.NonCritical)] + [InlineData("other", Priority.Normal)] + public async Task GetAsync_WithHeaderPriority_ResolvePriorityFromHeaderValue(string headerValue, Priority priority) + { + // Arrange + var options = new ConcurrencyOptions + { + InitialConcurrencyLimit = 1, + MinConcurrencyLimit = 1, + MaxConcurrencyLimit = 2, + InitialQueueSize = int.MaxValue, + MinQueueSize = int.MaxValue + }; + + var client = this.GetClient(options, x => x.UseHeaderPriorityResolver()); + + client.DefaultRequestHeaders.Add("X-Priority", headerValue); + + // Act + var tasks = Enumerable + .Range(0, 20) + .Select(_ => Task.Run(() => client.GetAsync("/api/people"))); + + var results = await Task.WhenAll(tasks.ToArray()); + + // Assert + Assert.NotEmpty(_enqueuedItems); + Assert.True(_enqueuedItems.All(x => x == priority)); + } + + [Theory] + [InlineData("critical")] + [InlineData("normal")] + [InlineData("noncritical")] + [InlineData("non-critical")] + [InlineData("other")] + public async Task GetAsync_WithEndpointPriority_ResolveFromEndpointPriorityAttribute(string headerValue) + { + // Arrange + var options = new ConcurrencyOptions + { + InitialConcurrencyLimit = 1, + MinConcurrencyLimit = 1, + MaxConcurrencyLimit = 2, + InitialQueueSize = int.MaxValue, + MinQueueSize = int.MaxValue + }; + + var client = this.GetClient(options, x => x.UseEndpointPriorityResolver()); + + client.DefaultRequestHeaders.Add("X-Priority", headerValue); + + // Act + var tasks = Enumerable + .Range(0, 5) + .Select(_ => Task.Run(() => client.GetAsync("/api/people"))); + + var results = await Task.WhenAll(tasks.ToArray()); + + // Assert + Assert.NotEmpty(_enqueuedItems); + Assert.True(_enqueuedItems.All(x => x == Priority.Critical)); + } + + [Fact] + public async Task GetAsync_WithListener_TheEventValuesAreCorrect() + { + // Arrange + const int InitialConcurrencyLimit = 10, InitialQueueSize = 10, MinQueueSize = 4; + + var options = new ConcurrencyOptions + { + InitialConcurrencyLimit = InitialConcurrencyLimit, + MinConcurrencyLimit = InitialConcurrencyLimit, + InitialQueueSize = InitialQueueSize, + MinQueueSize = MinQueueSize + }; + + var client = this.GetClient(options); + + // Act + var tasks = Enumerable + .Range(0, 80) + .Select(_ => client.GetAsync("/api/people")); + + var results = await Task.WhenAll(tasks.ToArray()); + + // Assert + Assert.Equal(_numberOfRejectedRequests, results.Count(x => x.StatusCode == HttpStatusCode.ServiceUnavailable)); + Assert.Contains(this._concurrencyLimits, x => x == InitialConcurrencyLimit); + Assert.Contains(this._queueLimits, x => x == InitialQueueSize); + } + + [Fact] + public async Task GetMetrics_WithDisableMetrics_ShouldNotExportDisableMetrics() + { + // Arrange + var client = this.GetClient( + new ConcurrencyOptions(), + metricOptionsDelegate: options => + { + options.QueueLimit.Enabled = false; + options.ConcurrencyLimit.Enabled = false; + options.RequestRejected.Enabled = false; + }); + + await client.GetAsync("/api/people"); + + // Act + var metrics = await client.GetAsync("/monitoring/metrics"); + + // Assert + Assert.Equal(HttpStatusCode.OK, metrics.StatusCode); + Assert.NotNull(metrics.Content); + Assert.Equal("text/plain", metrics.Content?.Headers?.ContentType?.MediaType); + + var content = metrics.Content?.ReadAsStringAsync().GetAwaiter().GetResult(); + + Assert.DoesNotContain("http_requests_concurrency_limit_total", content); + Assert.DoesNotContain("http_requests_queue_limit_total", content); + Assert.DoesNotContain("http_requests_rejected_total", content); + + Assert.Contains("http_requests_concurrency_items_total", content); + Assert.Contains("http_requests_task_processing_time_seconds", content); + } + + public HttpClient GetClient( + ConcurrencyOptions concurrencyoptions, + Action? optionsDelegate = null, + Action? metricOptionsDelegate = null) + { + var testServer = new TestServer( + new WebHostBuilder() + .ConfigureServices(services => + { + services.AddLoadShedding((provider, options) => + { + optionsDelegate?.Invoke(options.AdaptativeLimiter); + + options.AdaptativeLimiter.ConcurrencyOptions.MinConcurrencyLimit = concurrencyoptions.MinConcurrencyLimit; + options.AdaptativeLimiter.ConcurrencyOptions.MaxConcurrencyLimit = concurrencyoptions.MaxConcurrencyLimit; + options.AdaptativeLimiter.ConcurrencyOptions.InitialConcurrencyLimit = concurrencyoptions.InitialConcurrencyLimit; + options.AdaptativeLimiter.ConcurrencyOptions.Tolerance = concurrencyoptions.Tolerance; + options.AdaptativeLimiter.ConcurrencyOptions.InitialQueueSize = concurrencyoptions.InitialQueueSize; + options.AdaptativeLimiter.ConcurrencyOptions.MinQueueSize = concurrencyoptions.MinQueueSize; + + options.SubscribeEvents((p, events) => + { + events.QueueLimitChanged.Subscribe(args => this._queueLimits.Add(args.Limit)); + events.ConcurrencyLimitChanged.Subscribe(args => this._concurrencyLimits.Add(args.Limit)); + events.Rejected.Subscribe(args => Interlocked.Increment(ref this._numberOfRejectedRequests)); + events.ItemEnqueued.Subscribe(args => this._enqueuedItems.Add(args.Priority)); + }); + + options.AddMetrics(options => + { + options.Registry = this._collectorRegistry; + metricOptionsDelegate?.Invoke(options); + }); + }) + .AddControllers(); + }) + .Configure(x => x + .UseMetricServer("/monitoring/metrics", this._collectorRegistry) + .UseRouting() + .UseLoadShedding() + .UseEndpoints(endpoints => endpoints.MapControllers()))); + + return testServer.CreateClient(); + } + + private static async Task AssertMetrics(HttpClient client) + { + var metrics = await client.GetAsync("/monitoring/metrics"); + + // Assert + Assert.Equal(HttpStatusCode.OK, metrics.StatusCode); + Assert.NotNull(metrics.Content); + Assert.Equal("text/plain", metrics.Content?.Headers?.ContentType?.MediaType); + + var content = metrics.Content?.ReadAsStringAsync().GetAwaiter().GetResult(); + + Assert.Contains("http_requests_concurrency_items_total", content); + Assert.Contains("http_requests_concurrency_limit_total", content); + Assert.Contains("http_requests_task_processing_time_seconds", content); + Assert.Contains("http_requests_queue_items_total{method=\"GET\",priority=\"normal\"}", content); + Assert.Contains("http_requests_queue_limit_total", content); + Assert.Contains("http_requests_queue_time_seconds_sum{method=\"GET\",priority=\"normal\"}", content); + Assert.Contains("http_requests_queue_time_seconds_count{method=\"GET\",priority=\"normal\"}", content); + Assert.Contains("http_requests_queue_time_seconds_bucket{method=\"GET\",priority=\"normal\",le=\"0.0005\"}", content); + Assert.Contains("http_requests_rejected_total{method=\"GET\",priority=\"normal\",reason=\"max_queue_items\"}", content); + } + } +} diff --git a/tests/performance-tests/AdaptiveLimiterTests.jmx b/tests/performance-tests/AdaptiveLimiterTests.jmx new file mode 100644 index 0000000..5c436e4 --- /dev/null +++ b/tests/performance-tests/AdaptiveLimiterTests.jmx @@ -0,0 +1,429 @@ + + + + + + false + true + true + + + + Protocol + ${__P(Protocol,http)} + = + + + NumberOfRequestsPerMinute + ${__P(NumberOfRequestsPerMinute,300)} + = + + + TestDurationInSeconds + ${__P(TestDurationInSeconds,60)} + = + + + NumberOfThreads + ${__P(NumberOfThreads, 45)} + = + + + RampUpInSeconds + ${__P(RampUpInSeconds,15)} + = + + + Port + ${__P(Port,9025)} + = + + + ServerEndpointNoLimiter + ${__P(ServerEndpointNoLimiter,localhost)} + = + + + ServerEndpoint + ${__P(ServerEndpoint,localhost)} + = + + + + + + + + + + + ${ServerEndpoint} + ${Port} + ${Protocol} + + + 6 + HttpClient4 + + + + + + + + + + continue + + false + -1 + + ${NumberOfThreads} + ${RampUpInSeconds} + true + ${TestDurationInSeconds} + + true + + + + + throughput + 10000.0 + 0.0 + + 1 + + + + + + + ${ServerEndpoint} + ${Port} + + + api/GetWeather + GET + true + false + true + false + + + + + + + + continue + + false + -1 + + ${NumberOfThreads} + ${RampUpInSeconds} + true + ${TestDurationInSeconds} + + true + + + + + throughput + 10000.0 + 0.0 + + 1 + + + + + + + ${ServerEndpoint} + ${Port} + + + api/GetWeatherDelayed + GET + true + false + true + false + + + + + + + + continue + + false + -1 + + ${NumberOfThreads} + ${RampUpInSeconds} + true + ${TestDurationInSeconds} + + true + + + + + throughput + 10000.0 + 0.0 + + 1 + + + + + + + ${ServerEndpointNoLimiter} + ${Port} + + + api/GetWeather + GET + true + false + true + false + + + + + + + + continue + + false + -1 + + ${NumberOfThreads} + ${RampUpInSeconds} + true + ${TestDurationInSeconds} + + true + + + + + throughput + 10000.0 + 0.0 + + 1 + + + + + + + ${ServerEndpointNoLimiter} + ${Port} + + + api/GetWeatherDelayed + GET + true + false + true + false + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + C:\Users\erik.catalao\Documents\Repository\framework-load-shedding\temp.jtl + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + + diff --git a/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Controllers/WeatherForecastController.cs b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Controllers/WeatherForecastController.cs new file mode 100644 index 0000000..99f581b --- /dev/null +++ b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Controllers/WeatherForecastController.cs @@ -0,0 +1,41 @@ +using Farfetch.LoadShedding.PerformanceTests.Models; +using Microsoft.AspNetCore.Mvc; + +namespace Farfetch.LoadShedding.PerformanceTests.Controllers +{ + [ApiController] + [Route("api")] + public class WeatherForecastController : ControllerBase + { + private static readonly string[] s_summaries = new[] + { + "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" + }; + + [HttpGet("GetWeather")] + public IEnumerable Get() + { + return Enumerable.Range(1, 5).Select(index => new WeatherForecast + { + Date = DateTime.Now.AddDays(index), + TemperatureC = Random.Shared.Next(-20, 55), + Summary = s_summaries[Random.Shared.Next(s_summaries.Length)] + }) + .ToArray(); + } + + [HttpGet("GetWeatherDelayed")] + public async Task> GetDelayedAsync() + { + await Task.Delay(3000); + + return Enumerable.Range(1, 5).Select(index => new WeatherForecast + { + Date = DateTime.Now.AddDays(index), + TemperatureC = Random.Shared.Next(-20, 55), + Summary = s_summaries[Random.Shared.Next(s_summaries.Length)] + }) + .ToArray(); + } + } +} diff --git a/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Farfetch.LoadShedding.PerformanceTests.csproj b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Farfetch.LoadShedding.PerformanceTests.csproj new file mode 100644 index 0000000..6fb0410 --- /dev/null +++ b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Farfetch.LoadShedding.PerformanceTests.csproj @@ -0,0 +1,20 @@ + + + + net8.0 + enable + enable + true + + + + + + + + + PreserveNewest + + + + diff --git a/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Models/WeatherForecast.cs b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Models/WeatherForecast.cs new file mode 100644 index 0000000..fcf327b --- /dev/null +++ b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Models/WeatherForecast.cs @@ -0,0 +1,13 @@ +namespace Farfetch.LoadShedding.PerformanceTests.Models +{ + public class WeatherForecast + { + public DateTime Date { get; set; } + + public int TemperatureC { get; set; } + + public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); + + public string? Summary { get; set; } + } +} diff --git a/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Program.cs b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Program.cs new file mode 100644 index 0000000..21965e4 --- /dev/null +++ b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Program.cs @@ -0,0 +1,36 @@ +var builder = WebApplication.CreateBuilder(args); + +// Add services to the container. +builder.Services.AddControllers(); +builder.Services.AddLoadShedding((_, options) => +{ + options.AdaptativeLimiter.ConcurrencyOptions.QueueTimeoutInMs = 1000; + options.AdaptativeLimiter.ConcurrencyOptions.MinQueueSize = 1; + options.AdaptativeLimiter.ConcurrencyOptions.InitialQueueSize = 2; + options.AdaptativeLimiter.ConcurrencyOptions.MaxConcurrencyLimit = 10; + options.AdaptativeLimiter.ConcurrencyOptions.MinConcurrencyLimit = 1; + options.AdaptativeLimiter.ConcurrencyOptions.InitialConcurrencyLimit = 1; +}); + +builder.Configuration.AddEnvironmentVariables(); + +var app = builder.Build(); + +// Configure the HTTP request pipeline. +if (!app.Environment.IsDevelopment()) +{ + app.UseExceptionHandler("/Error"); +} + +if (app.Configuration.GetSection("UseLoadShedding").Get()) +{ + app.UseLoadShedding(); +} + +app.UseStaticFiles(); + +app.MapControllers(); + +app.UseRouting(); + +app.Run(); diff --git a/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Properties/launchSettings.json b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Properties/launchSettings.json new file mode 100644 index 0000000..e264f22 --- /dev/null +++ b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/Properties/launchSettings.json @@ -0,0 +1,13 @@ +{ + "profiles": { + "Farfetch.LoadShedding.PerformanceTests": { + "commandName": "Project", + "launchBrowser": true, + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + }, + "dotnetRunMessages": true, + "applicationUrl": "http://localhost:9025" + } + } +} \ No newline at end of file diff --git a/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/appsettings.json b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/appsettings.json new file mode 100644 index 0000000..04a8350 --- /dev/null +++ b/tests/performance-tests/Farfetch.LoadShedding.PerformanceTests/appsettings.json @@ -0,0 +1,10 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + }, + "AllowedHosts": "*", + "UseLoadShedding" : true +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Configurators/LoadSheddingConfiguratorTests.cs b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Configurators/LoadSheddingConfiguratorTests.cs new file mode 100644 index 0000000..dd257e2 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Configurators/LoadSheddingConfiguratorTests.cs @@ -0,0 +1,31 @@ +using Farfetch.LoadShedding.AspNetCore.Configurators; +using Microsoft.AspNetCore.Builder; +using Microsoft.Extensions.DependencyInjection; +using Xunit; + +namespace Farfetch.LoadShedding.AspNetCore.Tests.Configurators +{ + public class LoadSheddingConfiguratorTests + { + [Fact] + public void LoadSheddingConfigurator_UseAdaptativeLimiterWithOptions_RegisterOptionsWithCorrectValues() + { + // Arrange + var services = new ServiceCollection(); + + // Act + services.AddLoadShedding((provider, options) => + { + options.AdaptativeLimiter.ConcurrencyOptions.MinConcurrencyLimit = 1; + }); + + // Assert + var options = services + .BuildServiceProvider() + .GetService(); + + Assert.NotNull(options); + Assert.Equal(1, options.AdaptativeLimiter.ConcurrencyOptions.MinConcurrencyLimit); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Farfetch.LoadShedding.AspNetCore.Tests.csproj b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Farfetch.LoadShedding.AspNetCore.Tests.csproj new file mode 100644 index 0000000..7056f6e --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Farfetch.LoadShedding.AspNetCore.Tests.csproj @@ -0,0 +1,31 @@ + + + + net8.0 + enable + enable + + false + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + + + diff --git a/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Middlewares/AdaptativeConcurrencyLimiterMiddlewareTests.cs b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Middlewares/AdaptativeConcurrencyLimiterMiddlewareTests.cs new file mode 100644 index 0000000..bdca545 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Middlewares/AdaptativeConcurrencyLimiterMiddlewareTests.cs @@ -0,0 +1,120 @@ +using Farfetch.LoadShedding.AspNetCore.Middlewares; +using Farfetch.LoadShedding.AspNetCore.Options; +using Farfetch.LoadShedding.Exceptions; +using Farfetch.LoadShedding.Limiters; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.AspNetCore.Tests.Middlewares +{ + public class AdaptativeConcurrencyLimiterMiddlewareTests + { + private readonly Mock _contextMock; + private readonly Mock _responseMock; + private readonly Mock _requestMock; + private readonly Mock _requestDelegateMock; + private readonly Mock _concurrencyLimiterMock; + private readonly AdaptativeLimiterOptions _options; + private readonly HeaderDictionary _headers; + + public AdaptativeConcurrencyLimiterMiddlewareTests() + { + this._contextMock = new Mock(); + this._responseMock = new Mock(); + this._requestMock = new Mock(); + this._requestDelegateMock = new Mock(); + this._concurrencyLimiterMock = new Mock(); + this._options = new AdaptativeLimiterOptions(); + + this._headers = new HeaderDictionary(); + + this._requestMock.Setup(x => x.Headers).Returns(() => this._headers); + + this._contextMock.SetupGet(x => x.Request).Returns(() => this._requestMock.Object); + this._contextMock.SetupGet(x => x.Response).Returns(() => this._responseMock.Object); + } + + [Theory] + [InlineData("critical", Priority.Critical)] + [InlineData("noncritical", Priority.NonCritical)] + [InlineData("non-critical", Priority.NonCritical)] + [InlineData("normal", Priority.Normal)] + [InlineData("other", Priority.Normal)] + public async Task InvokeAsync_WithHeaderPriority_ExecuteAsyncIsInvokedWithDefinedPriority(string headerValue, Priority priority) + { + // Arrange + const string headerName = "X-Priority-Test"; + + this._concurrencyLimiterMock + .Setup(x => x.ExecuteAsync(It.IsAny>(), It.IsAny())) + .Returns(() => Task.CompletedTask); + + this._requestDelegateMock + .Setup(x => x.Invoke(It.IsAny())) + .Returns(() => Task.CompletedTask); + + this._options.UseHeaderPriorityResolver(headerName); + + this._headers.Add(headerName, headerValue); + + var target = new AdaptativeConcurrencyLimiterMiddleware( + this._requestDelegateMock.Object, + this._concurrencyLimiterMock.Object, + this._options.PriorityResolver); + + // Act + await target.InvokeAsync(this._contextMock.Object); + + // Assert + this._concurrencyLimiterMock.Verify(x => x.ExecuteAsync(priority, It.IsAny>(), It.IsAny())); + this._responseMock.VerifySet(x => x.StatusCode = It.IsAny(), Times.Never); + } + + [Fact] + public async Task InvokeAsync_WithoutException_ExecuteAsyncIsInvokedWithDefaultPriority() + { + // Arrange + this._concurrencyLimiterMock + .Setup(x => x.ExecuteAsync(It.IsAny(), It.IsAny>(), It.IsAny())) + .Returns(() => Task.CompletedTask); + + this._requestDelegateMock + .Setup(x => x.Invoke(It.IsAny())) + .Returns(() => Task.CompletedTask); + + var target = new AdaptativeConcurrencyLimiterMiddleware( + this._requestDelegateMock.Object, + this._concurrencyLimiterMock.Object, + this._options.PriorityResolver); + + // Act + await target.InvokeAsync(this._contextMock.Object); + + // Assert + this._concurrencyLimiterMock.Verify(x => x.ExecuteAsync(Priority.Normal, It.IsAny>(), It.IsAny())); + this._responseMock.VerifySet(x => x.StatusCode = It.IsAny(), Times.Never); + } + + [Fact] + public async Task InvokeAsync_WithLimitReachedExceptionException_Returns() + { + // Arrange + this._concurrencyLimiterMock + .Setup(x => x.ExecuteAsync(It.IsAny(), It.IsAny>(), It.IsAny())) + .Throws(new LimitReachedException("Exception")); + + var target = new AdaptativeConcurrencyLimiterMiddleware( + this._requestDelegateMock.Object, + this._concurrencyLimiterMock.Object, + this._options.PriorityResolver); + + // Act + await target.InvokeAsync(this._contextMock.Object); + + // Assert + this._responseMock.VerifySet(x => x.StatusCode = StatusCodes.Status503ServiceUnavailable); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/DefaultPriorityResolverTests.cs b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/DefaultPriorityResolverTests.cs new file mode 100644 index 0000000..8e8bd12 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/DefaultPriorityResolverTests.cs @@ -0,0 +1,42 @@ +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.AspNetCore.Tests.Resolvers +{ + public class DefaultPriorityResolverTests + { + private readonly Mock _contextMock; + private readonly DefaultPriorityResolver _target; + + public DefaultPriorityResolverTests() + { + this._contextMock = new Mock(); + this._target = new DefaultPriorityResolver(); + } + + [Fact] + public async Task ResolveAsync_WithDefaultPriority_ReturnsNormal() + { + // Act + var result = await this._target.ResolveAsync(this._contextMock.Object); + + // Assert + Assert.Equal(Priority.Normal, result); + } + + [Fact] + public async Task ResolveAsync_WithDefaultPriorityMultipleTimes_AlwaysReturnNormal() + { + // Act + var tasks = Enumerable.Range(0, 10).Select(_ => this._target.ResolveAsync(this._contextMock.Object)); + + var results = await Task.WhenAll(tasks); + + // Assert + Assert.True(results.All(x => x == Priority.Normal)); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/EndpointPriorityResolverTests.cs b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/EndpointPriorityResolverTests.cs new file mode 100644 index 0000000..d5860c1 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/EndpointPriorityResolverTests.cs @@ -0,0 +1,60 @@ +using Farfetch.LoadShedding.AspNetCore.Attributes; +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Http.Features; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.AspNetCore.Tests.Resolvers +{ + public class EndpointPriorityResolverTests + { + private readonly Mock _contextMock; + private readonly Mock _endpointFeatureMock; + private readonly EndpointPriorityResolver _target; + + public EndpointPriorityResolverTests() + { + this._contextMock = new Mock(); + this._endpointFeatureMock = new Mock(); + this._target = new EndpointPriorityResolver(); + + var featuresMock = new Mock(); + + featuresMock.Setup(x => x.Get()).Returns(() => this._endpointFeatureMock.Object); + + this._contextMock.Setup(x => x.Features).Returns(() => featuresMock.Object); + } + + [Theory] + [InlineData(Priority.Critical)] + [InlineData(Priority.NonCritical)] + [InlineData(Priority.Normal)] + public async Task ResolveAsync_WithEndpointPriority_ReturnPriority(Priority priority) + { + // Arrange + var metadataCollection = new EndpointMetadataCollection(new EndpointPriorityAttribute(priority)); + + var endpoint = new Endpoint(null, metadataCollection, string.Empty); + + _endpointFeatureMock.Setup(x => x.Endpoint).Returns(endpoint); + + // Act + var result = await this._target.ResolveAsync(this._contextMock.Object); + + // Assert + Assert.Equal(priority, result); + } + + [Fact] + public async Task ResolveAsync_MissingEndpointAttribute_ReturnPriority() + { + // Act + var result = await this._target.ResolveAsync(this._contextMock.Object); + + // Assert + Assert.Equal(Priority.Normal, result); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/HttpHeaderPriorityResolverTests.cs b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/HttpHeaderPriorityResolverTests.cs new file mode 100644 index 0000000..836efe2 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.AspNetCore.Tests/Resolvers/HttpHeaderPriorityResolverTests.cs @@ -0,0 +1,46 @@ +using Farfetch.LoadShedding.AspNetCore.Resolvers; +using Farfetch.LoadShedding.Tasks; +using Microsoft.AspNetCore.Http; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.AspNetCore.Tests.Resolvers +{ + public class HttpHeaderPriorityResolverTests + { + private readonly Mock _contextMock; + private readonly HeaderDictionary _headers; + + public HttpHeaderPriorityResolverTests() + { + this._contextMock = new Mock(); + this._headers = new HeaderDictionary(); + + var requestMock = new Mock(); + + requestMock.Setup(x => x.Headers).Returns(() => this._headers); + + this._contextMock.SetupGet(x => x.Request).Returns(() => requestMock.Object); + } + + [Theory] + [InlineData("critical", Priority.Critical)] + [InlineData("noncritical", Priority.NonCritical)] + [InlineData("non-critical", Priority.NonCritical)] + [InlineData("normal", Priority.Normal)] + [InlineData("other", Priority.Normal)] + public async Task ResolveAsync_WithHeaderPriority_ReturnPriority(string headerValue, Priority priority) + { + // Arrange + var target = new HttpHeaderPriorityResolver(); + + this._headers[HttpHeaderPriorityResolver.DefaultPriorityHeaderName] = headerValue; + + // Act + var result = await target.ResolveAsync(this._contextMock.Object); + + // Assert + Assert.Equal(priority, result); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Builders/AdaptativeLimiterBuilderTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Builders/AdaptativeLimiterBuilderTests.cs new file mode 100644 index 0000000..cc47244 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Builders/AdaptativeLimiterBuilderTests.cs @@ -0,0 +1,26 @@ +using Farfetch.LoadShedding.Builders; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Builders +{ + public class AdaptativeLimiterBuilderTests + { + [Fact] + public void Build_WithUseAdaptativeLimiter_LimiterShouldNotBeNull() + { + // Act + var target = new AdaptativeLimiterBuilder() + .WithOptions(options => + { + options.MinConcurrencyLimit = 20; + options.InitialConcurrencyLimit = 50; + options.InitialQueueSize = 50; + options.Tolerance = 2; + }) + .Build(); + + // Assert + Assert.NotNull(target); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Calculators/GradientLimitCalculatorTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Calculators/GradientLimitCalculatorTests.cs new file mode 100644 index 0000000..fdb3fa7 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Calculators/GradientLimitCalculatorTests.cs @@ -0,0 +1,75 @@ +using Farfetch.LoadShedding.Calculators; +using Farfetch.LoadShedding.Configurations; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Calculators +{ + public class GradientLimitCalculatorTests + { + private readonly Mock _concurrencyContextMock; + + public GradientLimitCalculatorTests() + { + this._concurrencyContextMock = new Mock(); + + this._concurrencyContextMock.Setup(x => x.AvgRTT).Returns(50); + this._concurrencyContextMock.Setup(x => x.MinRTT).Returns(50); + } + + [Fact] + public void CalculateLimit_NumberOfExecutionsEqualsToZero_ReturnsTheCurrentLimit() + { + // Arrange + const int ExpectedResult = 0; + + var options = new ConcurrencyOptions(); + + var target = new GradientLimitCalculator(options); + + this._concurrencyContextMock.Setup(x => x.MaxConcurrencyLimit).Returns(ExpectedResult); + + // Act + var result = target.CalculateLimit(this._concurrencyContextMock.Object); + + // Assert + Assert.Equal(options.MinConcurrencyLimit, result); + } + + [Theory] + [InlineData(20, 1, 2, 0, 0.2, 20)] // When Calculated Gradient Is Less Than 0.5 + [InlineData(20, 1, 2, 0, 1.5, 20)] // When Gradient Is Greater Than 1 And Max Queue Size Greater Than Current Limit + [InlineData(20, 2, 2, 0, 1.5, 20)] // When Gradient Is Greater Than 1 And Max Queue Size Less Than Current Limit + [InlineData(10, 5, 1, 0, 0.8, 10)] // When Current Limit Is Greater Than New Limit And Min Limit Being The Max Value + [InlineData(5, 5, 1, 0, 0.8, 2)] // When Current Limit Is Greater Than New Limit And Smooth Limit Being The Max Value + [InlineData(5, 5, 1, 60, 0.8, 2)] // When Current Limit Is Greater Than New Limit And Previous Avg Rtt Is Greater Than Current Avg Rtt + public void CalculateLimit_WithMultipleInputs_ReturnsLimitCorrectly( + int expectedResult, + int maxConcurrencyLimit, + int currentQueueItems, + int previousAvgRTT, + double tolerance, + int minConcurrencyLimit) + { + // Arrange + this._concurrencyContextMock.Setup(x => x.AvgRTT).Returns(50); + this._concurrencyContextMock.Setup(x => x.MinRTT).Returns(50); + this._concurrencyContextMock.Setup(x => x.MaxConcurrencyLimit).Returns(maxConcurrencyLimit); + this._concurrencyContextMock.Setup(x => x.MaxQueueSize).Returns(maxConcurrencyLimit); + this._concurrencyContextMock.Setup(x => x.CurrentQueueCount).Returns(currentQueueItems); + this._concurrencyContextMock.Setup(x => x.PreviousAvgRTT).Returns(previousAvgRTT); + + var target = new GradientLimitCalculator(new ConcurrencyOptions + { + Tolerance = tolerance, + MinConcurrencyLimit = minConcurrencyLimit + }); + + // Act + var result = target.CalculateLimit(this._concurrencyContextMock.Object); + + // Assert + Assert.Equal(expectedResult, result); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Calculators/SquareRootQueueCalculatorTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Calculators/SquareRootQueueCalculatorTests.cs new file mode 100644 index 0000000..eed6220 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Calculators/SquareRootQueueCalculatorTests.cs @@ -0,0 +1,33 @@ +using Farfetch.LoadShedding.Calculators; +using Farfetch.LoadShedding.Configurations; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Calculators +{ + public class SquareRootQueueCalculatorTests + { + [Theory] + [InlineData(10, 1, 10)] + [InlineData(10, 1028, 33)] + [InlineData(10, 20, 10)] + [InlineData(1, 20, 5)] + [InlineData(10, 121, 11)] + public void CalculateQueueSize_WithMultipleInputs_ReturnsTheQueueValueCorrectly(int minQueueSize, int maxConcurrencyLimit, int expectedResult) + { + // Arrange + var contextMock = new Mock(); + contextMock.Setup(x => x.MaxConcurrencyLimit).Returns(maxConcurrencyLimit); + + var options = new ConcurrencyOptions { MinQueueSize = minQueueSize }; + + var target = new SquareRootQueueCalculator(options); + + // Act + var result = target.CalculateQueueSize(contextMock.Object); + + // Assert + Assert.Equal(expectedResult, result); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Configurations/ConcurrencyContextTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Configurations/ConcurrencyContextTests.cs new file mode 100644 index 0000000..6475e48 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Configurations/ConcurrencyContextTests.cs @@ -0,0 +1,37 @@ +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Events; +using Farfetch.LoadShedding.Measures; +using Farfetch.LoadShedding.Tasks; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Configurations +{ + public class ConcurrencyContextTests + { + [Fact] + public void ConcurrencyContext_Initialization_ShouldSetTheCorrectValues() + { + // Arrange + const int MaxQueueSize = 20, MaxConcurrencyLimit = 10, CurrentQueueCount = 0; + + var events = new Mock(); + + var taskManager = new TaskManager( + MaxConcurrencyLimit, + MaxQueueSize, + Timeout.Infinite, + events.Object); + + var rtpMeasures = new RTTMeasures(2); + + // Act + var target = new ConcurrencyContext(taskManager, rtpMeasures); + + // Assert + Assert.Equal(MaxQueueSize, target.MaxQueueSize); + Assert.Equal(MaxConcurrencyLimit, target.MaxConcurrencyLimit); + Assert.Equal(CurrentQueueCount, target.CurrentQueueCount); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Configurations/ConcurrencyOptionsTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Configurations/ConcurrencyOptionsTests.cs new file mode 100644 index 0000000..cc9c1f0 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Configurations/ConcurrencyOptionsTests.cs @@ -0,0 +1,79 @@ +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Exceptions; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Configurations +{ + public class ConcurrencyOptionsTests + { + private const string ExpectedExceptionMessage = "The value of MinConcurrencyLimit, InitialConcurrencyLimit, MaxConcurrencyLimit, " + + "MinQueueSize, or InitialQueueSize should be greater than 0"; + + [Theory] + [InlineData(20, 50, 500, 1.5, 20, 50)] + [InlineData(1, 1, 2, 1.1, 1, 1)] + public void Validate_WithValidConfigurations_NoExceptionIsThrown( + int minConcurrencyLimit, + int initialConcurrencyLimit, + int maxConcurrencyLimit, + double tolerance, + int minQueueSize, + int initialQueueSize) + { + // Arrange + var target = new ConcurrencyOptions + { + MinConcurrencyLimit = minConcurrencyLimit, + InitialConcurrencyLimit = initialConcurrencyLimit, + MaxConcurrencyLimit = maxConcurrencyLimit, + Tolerance = tolerance, + MinQueueSize = minQueueSize, + InitialQueueSize = initialQueueSize, + }; + + // Act + var exception = Record.Exception(() => target.Validate()); + + // Assert + Assert.Null(exception); + } + + [Theory] + [InlineData(0, 50, 500, 1.5, 20, 50, ExpectedExceptionMessage)] + [InlineData(20, -5, 500, 1.5, 20, 50, ExpectedExceptionMessage)] + [InlineData(20, 50, 0, 1.5, 20, 50, ExpectedExceptionMessage)] + [InlineData(20, 50, 500, 1.5, 0, 50, ExpectedExceptionMessage)] + [InlineData(20, 50, 500, 1.5, 20, 0, ExpectedExceptionMessage)] + [InlineData(20, 50, 500, 1, 20, 50, "The value of Tolerance should be greater than 1")] + [InlineData(20, 50, 10, 1.5, 20, 50, "The value of MaxConcurrencyLimit should be greater than the MinConcurrencyLimit")] + [InlineData(20, 5, 500, 1.5, 20, 50, "The value of InitialConcurrencyLimit should be greater than MinConcurrencyLimit and less than MaxConcurrencyLimit")] + [InlineData(20, 600, 500, 1.5, 20, 50, "The value of InitialConcurrencyLimit should be greater than MinConcurrencyLimit and less than MaxConcurrencyLimit")] + [InlineData(20, 50, 500, 1.5, 20, 5, "The value of InitialQueueSize should be greater than the MinQueueSize")] + public void Validate_WithInvalidConfigurations_ExceptionIsThrown( + int minConcurrencyLimit, + int initialConcurrencyLimit, + int maxConcurrencyLimit, + double tolerance, + int minQueueSize, + int initialQueueSize, + string expectedMessage) + { + // Arrange + var target = new ConcurrencyOptions + { + MinConcurrencyLimit = minConcurrencyLimit, + InitialConcurrencyLimit = initialConcurrencyLimit, + MaxConcurrencyLimit = maxConcurrencyLimit, + Tolerance = tolerance, + MinQueueSize = minQueueSize, + InitialQueueSize = initialQueueSize, + }; + + // Act + var exception = Assert.Throws(() => target.Validate()); + + // Assert + Assert.Equal(expectedMessage, exception.Message); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Farfetch.LoadShedding.Tests.csproj b/tests/unit-tests/Farfetch.LoadShedding.Tests/Farfetch.LoadShedding.Tests.csproj new file mode 100644 index 0000000..6e38a18 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Farfetch.LoadShedding.Tests.csproj @@ -0,0 +1,29 @@ + + + + net8.0 + enable + enable + + false + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + + + diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Limiters/AdaptativeConcurrencyLimiterTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Limiters/AdaptativeConcurrencyLimiterTests.cs new file mode 100644 index 0000000..cf7fc45 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Limiters/AdaptativeConcurrencyLimiterTests.cs @@ -0,0 +1,30 @@ +using Farfetch.LoadShedding.Calculators; +using Farfetch.LoadShedding.Configurations; +using Farfetch.LoadShedding.Limiters; +using Farfetch.LoadShedding.Events; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Limiters +{ + public class AdaptativeConcurrencyLimiterTests + { + [Fact] + public async Task ExecuteAsync_WhenInvoked_TheFunctionIsCalled() + { + // Arrange + var wasInvoked = false; + var events = new LoadSheddingEvents(); + var limitCalculatorMock = new Mock(); + var queueSizeCalculatorMock = new Mock(); + + var target = new AdaptativeConcurrencyLimiter(new ConcurrencyOptions(), limitCalculatorMock.Object, queueSizeCalculatorMock.Object, events); + + // Act + await target.ExecuteAsync(() => { wasInvoked = true; return Task.CompletedTask; }); + + // Assert + Assert.True(wasInvoked); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Measures/RTTMeasuresTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Measures/RTTMeasuresTests.cs new file mode 100644 index 0000000..8d7b726 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Measures/RTTMeasuresTests.cs @@ -0,0 +1,58 @@ +using Farfetch.LoadShedding.Measures; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Measures +{ + public class RTTMeasuresTests + { + [Fact] + public void AddSample_WithOneExecution_SetTheCorrectValues() + { + // Arrange + const long ExpectedTotalCount = 1, ExpectedAvgRTT = 50, ExpectedMinRtt = 50; + + var target = new RTTMeasures(tolerance: 2); + + // Act + target.AddSample(durationInMs: 50); + + // Assert + Assert.Equal(ExpectedTotalCount, target.TotalCount); + Assert.Equal(ExpectedMinRtt, target.MinRTT); + Assert.Equal(ExpectedAvgRTT, target.AvgRTT); + } + + [Fact] + public void AddSample_WithMultipleExecutions_SetTheCorrectValues() + { + // Arrange + const long ExpectedTotalCount = 5, ExpectedAvgRTT = 44, ExpectedMinRtt = 40; + + var target = new RTTMeasures(tolerance: 1.5); + + // Act + new double[] { 50, 30, 55, 60, 25 }.ToList().ForEach(x => target.AddSample(durationInMs: x)); + + // Assert + Assert.Equal(ExpectedTotalCount, target.TotalCount); + Assert.Equal(ExpectedMinRtt, target.MinRTT); + Assert.Equal(ExpectedAvgRTT, target.AvgRTT); + } + + [Theory] + [InlineData(1.5, 50)] + [InlineData(0.8, 45)] + public void RecoverFromLoad_WithDifferentTolerance_TheValuesAreOrNotSet(double tolerance, long expectedAvgRtt) + { + // Arrange + var target = new RTTMeasures(tolerance); + target.AddSample(durationInMs: 50); + + // Act + target.RecoverFromLoad(); + + // Assert + Assert.Equal(expectedAvgRtt, target.AvgRTT); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskItemTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskItemTests.cs new file mode 100644 index 0000000..e5856fb --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskItemTests.cs @@ -0,0 +1,78 @@ +using Farfetch.LoadShedding.Tasks; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Tasks +{ + public class TaskItemTests + { + [Fact] + public async Task WaitAsync_Process_ReturnsProcessingStatus() + { + // Arrange + var taskItem = new TaskItem(0); + + var waitingTask = taskItem.WaitAsync(10000, CancellationToken.None); + + // Act + taskItem.Process(); + + await waitingTask; + + // Assert + Assert.Equal(TaskResult.Processing, taskItem.Status); + } + + [Fact] + public async Task WaitAsync_TimeoutReached_ReturnsCanceledStatus() + { + // Arrange + var taskItem = new TaskItem(0); + + var waitingTask = taskItem.WaitAsync(1, CancellationToken.None); + + // Act + await waitingTask; + + await Task.Delay(10); + + // Assert + Assert.Equal(TaskResult.Timeout, taskItem.Status); + } + + [Fact] + public async Task WaitAsync_CancelledToken_ReturnsCanceledStatus() + { + // Arrange + var taskItem = new TaskItem(0); + + using var source = new CancellationTokenSource(); + + // Act + var waitingTask = taskItem.WaitAsync(10000, source.Token); + + source.Cancel(); + + await waitingTask; + + // Assert + Assert.Equal(TaskResult.Timeout, taskItem.Status); + } + + [Fact] + public async Task WaitAsync_Reject_ReturnsProcessingStatus() + { + // Arrange + var taskItem = new TaskItem(0); + + var waitingTask = taskItem.WaitAsync(10000, CancellationToken.None); + + // Act + taskItem.Reject(); + + await waitingTask; + + // Assert + Assert.Equal(TaskResult.Rejected, taskItem.Status); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskManagerTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskManagerTests.cs new file mode 100644 index 0000000..9d347b6 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskManagerTests.cs @@ -0,0 +1,406 @@ +using Farfetch.LoadShedding.Events; +using Farfetch.LoadShedding.Events.Args; +using Farfetch.LoadShedding.Exceptions; +using Farfetch.LoadShedding.Tasks; +using Moq; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Tasks +{ + public class TaskManagerTests + { + private const Priority DefaultPriority = Priority.Normal; + + [Fact] + public void TaskManager_Initialize_TheValuesShouldBeDefined() + { + // Arrange + const int ExpectedTotalCount = 10, ExpectedQueueSize = 10; + + // Act + var target = new TaskManager(ExpectedTotalCount, ExpectedQueueSize); + + // Assert + Assert.Equal(ExpectedTotalCount, target.ConcurrencyLimit); + Assert.Equal(ExpectedQueueSize, target.QueueLimit); + Assert.Equal(0, target.ConcurrencyCount); + Assert.Equal(0, target.QueueCount); + } + + [Fact] + public void TaskManager_ChangeValues_TheValuesShouldBeCorrectlyDefined() + { + // Arrange + const int ExpectedTotalCount = 20, ExpectedQueueSize = 20; + + var target = new TaskManager(10, 10); + + // Act + target.ConcurrencyLimit = ExpectedTotalCount; + target.QueueLimit = ExpectedQueueSize; + + // Assert + Assert.Equal(ExpectedTotalCount, target.ConcurrencyLimit); + Assert.Equal(ExpectedQueueSize, target.QueueLimit); + Assert.Equal(0, target.ConcurrencyCount); + Assert.Equal(0, target.QueueCount); + } + + [Fact] + public void TaskManager_MaxCountBelow1_ExceptionIsThrown() + { + // Arrange + var target = new TaskManager(10, 10); + + const string ExpectedMessage = $"Must be greater than or equal to 1. (Parameter '{nameof(target.ConcurrencyLimit)}')"; + + // Act + var exception = Assert.Throws(() => target.ConcurrencyLimit = 0); + + // Assert + Assert.Equal(ExpectedMessage, exception.Message); + } + + [Fact] + public void TaskManager_WhenMaxCountIsChanged_NotifyConcurrencyLimitChangedIsCalled() + { + // Arrange + var expectedMaxCount = 20; + var currentLimit = 0; + var events = new LoadSheddingEvents(); + + events.ConcurrencyLimitChanged.Subscribe(args => currentLimit = args.Limit); + + var target = new TaskManager(10, 10, Timeout.Infinite, events); + + // Act + target.ConcurrencyLimit = expectedMaxCount; + + // Assert + Assert.Equal(expectedMaxCount, currentLimit); + } + + [Fact] + public async Task WaitAsync_UsingAllMaxCount_TheCurrentIsZeroEqualsToMaxCount() + { + // Arrange + const int MaxCount = 5; + + var target = new TaskManager(MaxCount, 5); + + // Act + for (var i = 0; i < MaxCount; i++) + { + await target.AcquireAsync(DefaultPriority); + } + + // Assert + Assert.Equal(MaxCount, target.ConcurrencyCount); + } + + [Fact] + public async Task WaitAsync_WhenTheMaxCountIs0_TheQueueIsIncreased() + { + // Arrange + const int MaxCount = 2, ExpectedQueueCount = 1; + + var target = new TaskManager(MaxCount, 2); + + // Act + for (var i = 0; i < MaxCount; i++) + { + await target.AcquireAsync(DefaultPriority); + } + + var waitTask = target.AcquireAsync(DefaultPriority); + + // Assert + Assert.Equal(MaxCount, target.ConcurrencyCount); + Assert.Equal(ExpectedQueueCount, target.QueueCount); + Assert.False(waitTask.IsCompleted); + } + + [Fact] + public async Task WaitAsync_WhenMaxCountIs0AndTheSemaphoreIsNotReleased_ExceptionIsThrown() + { + // Arrange + const int MaxCount = 2, ExpectedQueueCount = 0; + + var cancellationToken = new CancellationTokenSource(500); + + var target = new TaskManager(MaxCount, 2); + + for (var i = 0; i < MaxCount; i++) + { + await target.AcquireAsync(DefaultPriority); + } + + // Act + var exception = await Assert.ThrowsAsync(() => target.AcquireAsync(DefaultPriority,cancellationToken.Token)); + + // Assert + Assert.NotNull(exception); + Assert.Equal(MaxCount, target.ConcurrencyCount); + Assert.Equal(ExpectedQueueCount, target.QueueCount); + } + + [Fact] + public async Task WaitAsync_WhenAvailableConcurrencyIsChanged_NotifyConcurrentItemsCountChangedIsCalled() + { + // Arrange + var notifier = new Mock(); + + var itemProcessingEvent = new Mock>(); + + notifier.Setup(x => x.ItemProcessing).Returns(itemProcessingEvent.Object); + + var target = new TaskManager(10, 10, Timeout.Infinite, notifier.Object); + + // Act + await target.AcquireAsync(DefaultPriority); + + // Assert + itemProcessingEvent.Verify(x => x.Raise(It.IsAny()), Times.Once()); + } + + [Fact] + public async Task WaitAsync_WhenTheQueueLimitIsReached_ExceptionIsThrowns() + { + // Arrange + const string ExpectedMessage = "The maximum queue limit of 0 is reached."; + + var target = new TaskManager(0, 0); + + // Act + var result = await Assert.ThrowsAsync(() => target.AcquireAsync(DefaultPriority)); + + // Assert + Assert.Equal(ExpectedMessage, result.Message); + } + + [Fact] + public async Task WaitAsync_WhenTheQueueTimeoutIsReched_ExceptionIsThrowns() + { + // Arrange + const string ExpectedMessage = "The maximum queue timeout of 1 was reached."; + + var target = new TaskManager(0, 1, 1); + + // Act + var result = await Assert.ThrowsAsync(() => target.AcquireAsync(DefaultPriority)); + + // Assert + Assert.Equal(ExpectedMessage, result.Message); + } + + [Fact] + public async Task WaitAsync_WhenTheQueueLimitIsReached_AnEventIsRaised() + { + // Arrange + var rejectedItems = new List(); + + var events = new LoadSheddingEvents(); + + events.Rejected.Subscribe(args => rejectedItems.Add(args)); + + var target = new TaskManager(0, 0, Timeout.Infinite, events); + + // Act + await Assert.ThrowsAsync(() => target.AcquireAsync(DefaultPriority)); + + // Assert + Assert.Single(rejectedItems); + } + + [Fact] + public async Task Release_AllTheSemaphoreConcurrentRequestsAreReleased_TheCurrentCountReturnsToTheInitialValue() + { + // Arrange + const int ExpectedSemaphoreCurrentCount = 3; + + var target = new TaskManager(ExpectedSemaphoreCurrentCount, 5); + + var tasks = Enumerable.Range(0, ExpectedSemaphoreCurrentCount) + .Select(_ => target.AcquireAsync(DefaultPriority)); + + var items = await Task.WhenAll(tasks); + + // Act + foreach (var item in items) + { + item.Complete(); + } + + // Assert + Assert.Equal(0, target.ConcurrencyCount); + } + + [Fact] + public async Task Release_ConcurrencyItemsIsNotZero_NotifyConcurrentItemsCountChangedIsCalled() + { + // Arrange + var processedItems = new List(); + var processingItems = new List(); + + var events = new LoadSheddingEvents(); + + events.ItemProcessed.Subscribe(args => processedItems.Add(args)); + events.ItemProcessing.Subscribe(args => processingItems.Add(args)); + + var target = new TaskManager(10, 10, Timeout.Infinite, events); + + var item = await target.AcquireAsync(0); + + // Act + item.Complete(); + + // Assert + Assert.Single(processingItems); + Assert.Equal(item.Priority, processingItems.First().Priority); + Assert.Equal(1, processingItems.First().ConcurrencyCount); + Assert.Equal(10, processingItems.First().ConcurrencyLimit); + + Assert.Single(processedItems); + Assert.Equal(item.Priority, processedItems.First().Priority); + Assert.Equal(item.ProcessingTime, processedItems.First().ProcessingTime); + Assert.Equal(0, processedItems.First().ConcurrencyCount); + Assert.Equal(10, processedItems.First().ConcurrencyLimit); + } + + [Fact] + public async Task WaitAndRelease_ReleaseWhenRequestIsWaiting_TheQueueIsCleared() + { + // Arrange + const int MaxCount = 2, ExpectedQueueCount1 = 1, ExpectedQueueCount2 = 0; + + var target = new TaskManager(MaxCount, 2); + + var tasks = Enumerable.Range(0, MaxCount).Select(_ => Task.Run(() => target.AcquireAsync(DefaultPriority))); + + var items = await Task.WhenAll(tasks); + + // Act + var semaphoreWaitTask = target.AcquireAsync(DefaultPriority); + + // Assert + Assert.Equal(MaxCount, target.ConcurrencyCount); + Assert.Equal(ExpectedQueueCount1, target.QueueCount); + Assert.False(semaphoreWaitTask.IsCompleted); + + // Act + items.First().Complete(); + + await Task.Delay(1); + + // Assert + Assert.Equal(MaxCount, target.ConcurrencyCount); + Assert.Equal(ExpectedQueueCount2, target.QueueCount); + Assert.True(semaphoreWaitTask.IsCompleted); + } + + [Fact] + public async Task Acquire_WithHigherPriorityTaskAndQueueIsFull_CancelLowPriorityTask() + { + // Arrange + var rejectedItems = new List(); + + var events = new LoadSheddingEvents(); + + events.Rejected.Subscribe(args => rejectedItems.Add(args)); + + var target = new TaskManager(1, 1, Timeout.Infinite, events); + + // Act + var item = await target.AcquireAsync(DefaultPriority); + var lowPriorityTask = target.AcquireAsync(Priority.NonCritical); + var highPriorityTask = target.AcquireAsync(Priority.Critical); + + item.Complete(); + + await Assert.ThrowsAsync(async () => await lowPriorityTask); + + await highPriorityTask; + + // Assert + Assert.True(highPriorityTask.IsCompleted); + Assert.False(highPriorityTask.IsFaulted); + Assert.False(highPriorityTask.IsCanceled); + Assert.True(lowPriorityTask.IsCompleted); + Assert.True(lowPriorityTask.IsFaulted); + Assert.False(lowPriorityTask.IsCanceled); + Assert.Equal(1, target.ConcurrencyCount); + Assert.Single(rejectedItems); + Assert.Equal(Priority.NonCritical, rejectedItems.First().Priority); + } + + [Fact] + public async Task Acquire_WithLowerPriorityTaskAndQueueIsFull_RejectTask() + { + // Arrange + var rejectedItems = new List(); + + var events = new LoadSheddingEvents(); + + var target = new TaskManager(1, 1, Timeout.Infinite, events); + + events.Rejected.Subscribe(args => rejectedItems.Add(args)); + + // Act + var item = await target.AcquireAsync(DefaultPriority); + var highPriorityTask = target.AcquireAsync(Priority.Critical); + var lowPriorityTask = target.AcquireAsync(Priority.Normal); + + item.Complete(); + + await Assert.ThrowsAsync(async () => await lowPriorityTask); + + await highPriorityTask; + + // Assert + Assert.True(highPriorityTask.IsCompleted); + Assert.False(highPriorityTask.IsFaulted); + Assert.False(highPriorityTask.IsCanceled); + Assert.True(lowPriorityTask.IsCompleted); + Assert.True(lowPriorityTask.IsFaulted); + Assert.False(lowPriorityTask.IsCanceled); + Assert.Equal(1, target.ConcurrencyCount); + Assert.Single(rejectedItems); + Assert.Equal(Priority.Normal, rejectedItems.First().Priority); + } + + [Fact] + public async Task Acquire_MultipleTasksEnqueued_ProcessAllTasksWithSuccess() + { + // Arrange + var enqueuedItemsCount = 0; + var dequeuedItemsCount = 0; + + var numberOfTasks = 10000; + + var lockObje = new object(); + + var events = new LoadSheddingEvents(); + + events.ItemEnqueued.Subscribe(_ => Interlocked.Increment(ref enqueuedItemsCount)); + events.ItemDequeued.Subscribe(_ => Interlocked.Increment(ref dequeuedItemsCount)); + + var target = new TaskManager(1, int.MaxValue, Timeout.Infinite, events); + + // Act + var tasks = Enumerable.Range(0, numberOfTasks).Select(_ => Task.Run(async () => + { + var item = await target.AcquireAsync((Priority)Random.Shared.Next(3)); + + item.Complete(); + + return true; + })); + + var result = await Task.WhenAll(tasks); + + // Assert + Assert.Equal(numberOfTasks, tasks.Count(t => t.Result)); + Assert.Equal(enqueuedItemsCount, dequeuedItemsCount); + } + } +} diff --git a/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskQueueTests.cs b/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskQueueTests.cs new file mode 100644 index 0000000..f4a5400 --- /dev/null +++ b/tests/unit-tests/Farfetch.LoadShedding.Tests/Tasks/TaskQueueTests.cs @@ -0,0 +1,68 @@ +using Farfetch.LoadShedding.Tasks; +using Xunit; + +namespace Farfetch.LoadShedding.Tests.Tasks +{ + public class TaskQueueTests + { + private readonly TaskQueue _target; + + public TaskQueueTests() + { + this._target = new TaskQueue(int.MaxValue); + } + + [Fact] + public void Enqueue_QueueLimitNotReached_AddToQueue() + { + // Arrange + var task = new TaskItem(0); + + // Act + this._target.Enqueue(task); + + // Assert + Assert.Equal(1, _target.Count); + Assert.Equal(TaskResult.Pending, task.Status); + } + + [Fact] + public void Enqueue_QueueLimitIsReached_RejectItem() + { + // Arrange + this._target.Limit = 1; + + var firstTask = new TaskItem(0); + var lastTask = new TaskItem(0); + + this._target.Enqueue(firstTask); + + // Act + this._target.Enqueue(lastTask); + + // Assert + Assert.Equal(1, _target.Count); + Assert.Equal(TaskResult.Rejected, lastTask.Status); + } + + [Fact] + public void Enqueue_TaskWithHigherPriorityQueueLimitIsReached_EnqueueItemAndRejectTheLastItemWithLowerPriority() + { + // Arrange + this._target.Limit = 1; + + var lowPriorityTask = new TaskItem(Priority.NonCritical); + this._target.Enqueue(lowPriorityTask); + + var highPriorityTask = new TaskItem(0); + + // Act + this._target.Enqueue(highPriorityTask); + + // Assert + Assert.Equal(1, _target.Count); + Assert.Equal(TaskResult.Pending, highPriorityTask.Status); + Assert.Equal(TaskResult.Rejected, lowPriorityTask.Status); + } + } +}