From e0b87852d519cf31cee961e9c07f7af7ace9dd4c Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Thu, 1 Jan 1970 00:00:00 +0000 Subject: [PATCH 01/29] Empty default branch for forked repos Signed-off-by: Vitaliy Potyarkin --- README.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..7463f9e --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# WIP area: this repo is just a fork! + +Useful things may be published only in [other branches](../../../branches) From eca6765dda1df05d9dd7008c74fbdbf0e3bd0a38 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 20 Jan 2025 15:53:35 +0300 Subject: [PATCH 02/29] Initial commit Signed-off-by: Dmitrii Stepanov --- .forgejo/ISSUE_TEMPLATE/bug_report.md | 46 +++++ .forgejo/ISSUE_TEMPLATE/config.yml | 1 + .forgejo/ISSUE_TEMPLATE/feature_request.md | 28 +++ .forgejo/logo.svg | 70 +++++++ .forgejo/workflows/dco.yml | 21 +++ .forgejo/workflows/pre-commit.yml | 30 +++ .forgejo/workflows/tests.yml | 116 ++++++++++++ .forgejo/workflows/vulncheck.yml | 27 +++ .gitattributes | 2 + .gitignore | 22 +++ .golangci.yml | 75 ++++++++ .pre-commit-config.yaml | 38 ++++ LICENSE | 201 +++++++++++++++++++++ Makefile | 112 ++++++++++++ README.md | 22 +++ go.mod | 3 + 16 files changed, 814 insertions(+) create mode 100644 .forgejo/ISSUE_TEMPLATE/bug_report.md create mode 100644 .forgejo/ISSUE_TEMPLATE/config.yml create mode 100644 .forgejo/ISSUE_TEMPLATE/feature_request.md create mode 100644 .forgejo/logo.svg create mode 100644 .forgejo/workflows/dco.yml create mode 100644 .forgejo/workflows/pre-commit.yml create mode 100644 .forgejo/workflows/tests.yml create mode 100644 .forgejo/workflows/vulncheck.yml create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 .golangci.yml create mode 100644 .pre-commit-config.yaml create mode 100644 LICENSE create mode 100755 Makefile create mode 100644 README.md create mode 100644 go.mod diff --git a/.forgejo/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..fb16999 --- /dev/null +++ b/.forgejo/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,46 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: community, triage, bug +assignees: '' + +--- + + + +## Expected Behavior + + +## Current Behavior + + +## Possible Solution + + +## Steps to Reproduce (for bugs) + + +1. + +## Context + + +## Regression + + +## Your Environment + +* Version used: +* Server setup and configuration: +* Operating System and version (`uname -a`): diff --git a/.forgejo/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..3ba13e0 --- /dev/null +++ b/.forgejo/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/.forgejo/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..5beeb06 --- /dev/null +++ b/.forgejo/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,28 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: community, triage +assignees: '' + +--- + +## Is your feature request related to a problem? Please describe. + + +## Describe the solution you'd like + + +## Describe alternatives you've considered + + +## Additional context + + +## Don't forget to add labels! +- component label (`neofs-adm`, `neofs-storage`, ...) +- issue type (`enhancement`, `refactor`, ...) +- `goodfirstissue`, `helpwanted` if needed +- does this issue belong to an epic? +- priority (`P0`-`P4`) if already triaged +- quarter label (`202XQY`) if possible diff --git a/.forgejo/logo.svg b/.forgejo/logo.svg new file mode 100644 index 0000000..148c359 --- /dev/null +++ b/.forgejo/logo.svg @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml new file mode 100644 index 0000000..7c5af84 --- /dev/null +++ b/.forgejo/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO action +on: [pull_request] + +jobs: + dco: + name: DCO + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.22' + + - name: Run commit format checker + uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 + with: + from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml new file mode 100644 index 0000000..b27e7a3 --- /dev/null +++ b/.forgejo/workflows/pre-commit.yml @@ -0,0 +1,30 @@ +name: Pre-commit hooks + +on: + pull_request: + push: + branches: + - master + +jobs: + precommit: + name: Pre-commit + env: + # Skip pre-commit hooks which are executed by other actions. + SKIP: make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt + runs-on: ubuntu-22.04 + # If we use actions/setup-python from either Github or Gitea, + # the line above fails with a cryptic error about not being able to find python. + # So install everything manually. + steps: + - uses: actions/checkout@v3 + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: 1.23 + - name: Set up Python + run: | + apt update + apt install -y pre-commit + - name: Run pre-commit + run: pre-commit run --color=always --hook-stage manual --all-files diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml new file mode 100644 index 0000000..4f1bebe --- /dev/null +++ b/.forgejo/workflows/tests.yml @@ -0,0 +1,116 @@ +name: Tests and linters + +on: + pull_request: + push: + branches: + - master + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.23' + cache: true + + - name: Install linters + run: make lint-install + + - name: Run linters + run: make lint + + tests: + name: Tests + runs-on: ubuntu-latest + strategy: + matrix: + go_versions: [ '1.22', '1.23' ] + fail-fast: false + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '${{ matrix.go_versions }}' + cache: true + + - name: Run tests + run: make test + + tests-race: + name: Tests with -race + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.22' + cache: true + + - name: Run tests + run: go test ./... -count=1 -race + + staticcheck: + name: Staticcheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.23' + cache: true + + - name: Install staticcheck + run: make staticcheck-install + + - name: Run staticcheck + run: make staticcheck-run + + gopls: + name: gopls check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.22' + cache: true + + - name: Install gopls + run: make gopls-install + + - name: Run gopls + run: make gopls-run + + fumpt: + name: Run gofumpt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.23' + cache: true + + - name: Install gofumpt + run: make fumpt-install + + - name: Run gofumpt + run: | + make fumpt + git diff --exit-code --quiet diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml new file mode 100644 index 0000000..cf15005 --- /dev/null +++ b/.forgejo/workflows/vulncheck.yml @@ -0,0 +1,27 @@ +name: Vulncheck + +on: + pull_request: + push: + branches: + - master + +jobs: + vulncheck: + name: Vulncheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.23' + + - name: Install govulncheck + run: go install golang.org/x/vuln/cmd/govulncheck@latest + + - name: Run govulncheck + run: govulncheck ./... diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..c7a3f7a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +/**/*.pb.go -diff -merge +/**/*.pb.go linguist-generated=true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bfdd2f7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +# IDE +.idea +.vscode + +# Vendoring +vendor + +# tempfiles +.DS_Store +*~ +.cache + +temp +tmp + +# binary +bin/ +release/ + +# coverage +coverage.txt +coverage.html diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..b0499c7 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,75 @@ +# This file contains all available configuration options +# with their default values. + +# options for analysis running +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 5m + + # include test files or not, default is true + tests: false + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + formats: + - format: tab + +# all available settings of specific linters +linters-settings: + exhaustive: + # indicates that switch statements are to be considered exhaustive if a + # 'default' case is present, even if all enum members aren't listed in the + # switch + default-signifies-exhaustive: true + funlen: + lines: 60 # default 60 + statements: 40 # default 40 + gocognit: + min-complexity: 30 # default 30 + importas: + no-unaliased: true + no-extra-aliases: false + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + +linters: + enable: + # mandatory linters + - govet + - revive + + # some default golangci-lint linters + - errcheck + - gosimple + - godot + - ineffassign + - staticcheck + - typecheck + - unused + + # extra linters + - bidichk + - durationcheck + - exhaustive + - copyloopvar + - gofmt + - goimports + - misspell + - predeclared + - reassign + - whitespace + - containedctx + - funlen + - gocognit + - contextcheck + - importas + - perfsprint + - testifylint + - protogetter + - intrange + - tenv + disable-all: true + fast: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..946e000 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,38 @@ +ci: + autofix_prs: false + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: check-shebang-scripts-are-executable + - id: check-merge-conflict + - id: check-json + - id: check-xml + - id: check-yaml + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - id: end-of-file-fixer + exclude: ".key$" + + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.9.0.2 + hooks: + - id: shellcheck + + - repo: https://github.com/golangci/golangci-lint + rev: v1.59.1 + hooks: + - id: golangci-lint + + - repo: local + hooks: + - id: go-unit-tests + name: go unit tests + entry: make test GOFLAGS='' + pass_filenames: false + types: [go] + language: system diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..c8f2da3 --- /dev/null +++ b/Makefile @@ -0,0 +1,112 @@ +#!/usr/bin/make -f +SHELL = bash + +BIN = bin +TMP_DIR := .cache + +SOURCES = $(shell find . -type f -name "*.go" -print) + +LINT_VERSION ?= 1.63.4 +LINT_DIR ?= $(abspath $(BIN))/golangci-lint +LINT_VERSION_DIR = $(LINT_DIR)/$(LINT_VERSION) + +STATICCHECK_VERSION ?= 2024.1.1 +STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck +STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION) + +GOPLS_VERSION ?= v0.17.1 +GOPLS_DIR ?= $(abspath $(BIN))/gopls +GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) +GOPLS_TEMP_FILE := $(shell mktemp) + +GOFUMPT_VERSION ?= v0.7.0 +GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt +GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) + +# Run all code formatters +fmts: fmt imports + +# Reformat code +fmt: + @echo "⇒ Processing gofmt check" + @gofumpt -s -w . + +# Reformat imports +imports: + @echo "⇒ Processing goimports check" + @goimports -w . + +# Run Unit Test with go test +test: GOFLAGS ?= "-count=1" +test: + @echo "⇒ Running go test" + @GOFLAGS="$(GOFLAGS)" go test ./... + +# Activate pre-commit hooks +pre-commit: + pre-commit install -t pre-commit -t commit-msg + +# Deactivate pre-commit hooks +unpre-commit: + pre-commit uninstall -t pre-commit -t commit-msg + +pre-commit-run: + @pre-commit run -a --hook-stage manual + +# Install linters +lint-install: + @rm -rf $(LINT_DIR) + @mkdir $(LINT_DIR) + @CGO_ENABLED=1 GOBIN=$(LINT_VERSION_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + +# Run linters +lint: + @if [ ! -d "$(LINT_VERSION_DIR)" ]; then \ + make lint-install; \ + fi + $(LINT_VERSION_DIR)/golangci-lint run + +# Install staticcheck +staticcheck-install: + @rm -rf $(STATICCHECK_DIR) + @mkdir $(STATICCHECK_DIR) + @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) + +# Run staticcheck +staticcheck-run: + @if [ ! -d "$(STATICCHECK_VERSION_DIR)" ]; then \ + make staticcheck-install; \ + fi + @$(STATICCHECK_VERSION_DIR)/staticcheck ./... + +# Install gopls +gopls-install: + @rm -rf $(GOPLS_DIR) + @mkdir $(GOPLS_DIR) + @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) + +# Run gopls +gopls-run: + @if [ ! -d "$(GOPLS_VERSION_DIR)" ]; then \ + make gopls-install; \ + fi + $(GOPLS_VERSION_DIR)/gopls check $(SOURCES) 2>&1 >$(GOPLS_TEMP_FILE) + @if [[ $$(wc -l < $(GOPLS_TEMP_FILE)) -ne 0 ]]; then \ + cat $(GOPLS_TEMP_FILE); \ + exit 1; \ + fi + rm $(GOPLS_TEMP_FILE) + +# Install gofumpt +fumpt-install: + @rm -rf $(GOFUMPT_DIR) + @mkdir $(GOFUMPT_DIR) + @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) + +# Run gofumpt +fumpt: + @if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \ + make fumpt-install; \ + fi + @echo "⇒ Processing gofumpt check" + $(GOFUMPT_VERSION_DIR)/gofumpt -l -w . diff --git a/README.md b/README.md new file mode 100644 index 0000000..36d5fb8 --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +# Quality of Service (QoS) Go libraries for FrostFS object storage + +See package documentation +at [pkg.go.dev](https://pkg.go.dev/git.frostfs.info/TrueCloudLab/frostfs-qos) + +## License and copyright + +Copyright 2023-2025 FrostFS contributors + +``` + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..00b2e04 --- /dev/null +++ b/go.mod @@ -0,0 +1,3 @@ +module git.frostfs.info/TrueCloudLab/frostfs-qos + +go 1.22 From 0dccab22c257a056a9618532ad676ba2c6b6efab Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 20 Jan 2025 17:10:24 +0300 Subject: [PATCH 03/29] [#1] mclock: Initial implementation Signed-off-by: Dmitrii Stepanov --- go.mod | 11 + go.sum | 12 + scheduling/mclock.go | 581 ++++++++++++++++++++++++++++++++ scheduling/mclock_bench.result | 172 ++++++++++ scheduling/mclock_bench_test.go | 87 +++++ scheduling/mclock_test.go | 459 +++++++++++++++++++++++++ 6 files changed, 1322 insertions(+) create mode 100644 go.sum create mode 100644 scheduling/mclock.go create mode 100644 scheduling/mclock_bench.result create mode 100644 scheduling/mclock_bench_test.go create mode 100644 scheduling/mclock_test.go diff --git a/go.mod b/go.mod index 00b2e04..f3e6160 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,14 @@ module git.frostfs.info/TrueCloudLab/frostfs-qos go 1.22 + +require ( + github.com/stretchr/testify v1.9.0 + golang.org/x/sync v0.10.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..8c93871 --- /dev/null +++ b/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/scheduling/mclock.go b/scheduling/mclock.go new file mode 100644 index 0000000..7c06bbc --- /dev/null +++ b/scheduling/mclock.go @@ -0,0 +1,581 @@ +package scheduling + +import ( + "container/heap" + "context" + "errors" + "math" + "sync" + "time" +) + +const ( + invalidIndex = -1 + undefinedReservation float64 = -1.0 +) + +var ( + ErrMClockSchedulerClosed = errors.New("mClock scheduler is closed") + ErrMClockSchedulerRequestLimitExceeded = errors.New("mClock scheduler request limit exceeded") + ErrMClockSchedulerUnknownTag = errors.New("unknown tag") + ErrInvalidTagInfo = errors.New("invalid tag info: shares, limit and reservation must be greater than zero") + ErrInvalidRunLimit = errors.New("invalid run limit: must be greater than zero") +) + +type queueItem interface { + ts() float64 + setIndex(idx int) +} + +type queue struct { + items []queueItem +} + +type request struct { + tag string + ts float64 + + reservation float64 + limit float64 + shares float64 + + reservationIdx int + limitIdx int + sharesIdx int + readyIdx int + + scheduled chan struct{} + canceled chan struct{} +} + +type clock interface { + now() float64 + runAt(ts float64, f func()) + close() +} + +// ReleaseFunc is the type of function that should be called after the request is completed. +type ReleaseFunc func() + +// TagInfo contains reserved IOPS, IOPS limit and share values for a tag. +type TagInfo struct { + ReservedIOPS *float64 + LimitIOPS *float64 + Share float64 +} + +// MClock is mClock scheduling algorithm implementation. +// +// See https://www.usenix.org/legacy/event/osdi10/tech/full_papers/Gulati.pdf for details. +type MClock struct { + runLimit uint64 + waitLimit int + clock clock + idleTimeout float64 + tagInfo map[string]TagInfo + + mtx sync.Mutex + previous map[string]*request + inProgress uint64 + lastSchedule float64 + reservationQueue *queue + limitQueue *queue + sharesQueue *queue + readyQueue *queue + closed bool +} + +// NewMClock creates new MClock scheduler instance with +// runLimit maximum allowed count of running requests and +// waitLimit maximum allowed count of waiting requests +// for tags specified by tagInfo. The value of idleTimeout defines +// the difference between the current time and the time of +// the previous request in seconds, at which the tag considered idle. +// If idleTimeout is negative, it means that there is no idle tags allowed. +// If waitLimit equals zero, it means that there is no limit on the +// number of waiting requests. +func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeout float64) (*MClock, error) { + if err := validateParams(runLimit, tagInfo); err != nil { + return nil, err + } + result := &MClock{ + runLimit: runLimit, + waitLimit: int(waitLimit), + clock: newSystemClock(), + idleTimeout: idleTimeout, + tagInfo: tagInfo, + + reservationQueue: &queue{}, + limitQueue: &queue{}, + sharesQueue: &queue{}, + readyQueue: &queue{}, + } + + previous := make(map[string]*request) + for tag := range tagInfo { + previous[tag] = &request{ + tag: tag, + reservationIdx: invalidIndex, + limitIdx: invalidIndex, + sharesIdx: invalidIndex, + } + } + result.previous = previous + + return result, nil +} + +// RequestArrival schedules new request with tag value. +// Method call is blocked until one of the following events occurs: +// request with the tag is scheduled for execution, +// context ctx is canceled or the scheduler is closed. +// If the method call returned non-nil ReleaseFunc, +// then it must be called after the request is completed. +func (q *MClock) RequestArrival(ctx context.Context, tag string) (ReleaseFunc, error) { + req, release, err := q.pushRequest(tag) + if err != nil { + return nil, err + } + select { + case <-ctx.Done(): + q.dropRequest(req) + return nil, ctx.Err() + case <-req.scheduled: + return release, nil + case <-req.canceled: + return nil, ErrMClockSchedulerClosed + } +} + +// Close closes MClock scheduler. +// No new requests for scheduling will be accepted after the closing. +func (q *MClock) Close() { + q.mtx.Lock() + defer q.mtx.Unlock() + + q.closed = true + q.clock.close() + for q.limitQueue.Len() > 0 { + item := heap.Pop(q.limitQueue).(*limitMQueueItem) + close(item.r.canceled) + q.removeFromQueues(item.r) + } +} + +func validateParams(runLimit uint64, tagInfo map[string]TagInfo) error { + if runLimit == 0 { + return ErrInvalidRunLimit + } + for _, v := range tagInfo { + if v.LimitIOPS != nil && (math.IsNaN(*v.LimitIOPS) || *v.LimitIOPS <= float64(0)) { + return ErrInvalidTagInfo + } + if v.ReservedIOPS != nil && (math.IsNaN(*v.ReservedIOPS) || *v.ReservedIOPS <= float64(0)) { + return ErrInvalidTagInfo + } + if math.IsNaN(v.Share) || v.Share <= float64(0) { + return ErrInvalidTagInfo + } + } + return nil +} + +func (q *MClock) dropRequest(req *request) { + q.mtx.Lock() + defer q.mtx.Unlock() + + select { + case <-req.scheduled: + if q.inProgress == 0 { + panic("invalid requests count") + } + q.inProgress-- + default: + } + + q.removeFromQueues(req) +} + +func (q *MClock) pushRequest(tag string) (*request, ReleaseFunc, error) { + q.mtx.Lock() + defer q.mtx.Unlock() + + if q.closed { + return nil, nil, ErrMClockSchedulerClosed + } + if q.waitLimit > 0 && q.sharesQueue.Len() == q.waitLimit { + return nil, nil, ErrMClockSchedulerRequestLimitExceeded + } + + now := q.clock.now() + tagInfo, ok := q.tagInfo[tag] + if !ok { + return nil, nil, ErrMClockSchedulerUnknownTag + } + prev, ok := q.previous[tag] + if !ok { + panic("undefined previous: " + tag) + } + + if q.idleTimeout >= 0 && now-prev.ts > q.idleTimeout { // was inactive for q.idleTimeout + q.adjustTags(now, tag) + } + + r := &request{ + tag: tag, + ts: now, + shares: max(prev.shares+1.0/tagInfo.Share, now), + reservationIdx: invalidIndex, + limitIdx: invalidIndex, + sharesIdx: invalidIndex, + readyIdx: invalidIndex, + scheduled: make(chan struct{}), + canceled: make(chan struct{}), + } + if tagInfo.ReservedIOPS != nil { + r.reservation = max(prev.reservation + 1.0 / *tagInfo.ReservedIOPS, now) + } else { + r.reservation = undefinedReservation + } + + if tagInfo.LimitIOPS != nil { + r.limit = max(prev.limit + 1.0 / *tagInfo.LimitIOPS, now) + } else { + r.limit = max(prev.limit, now) + } + + q.previous[tag] = r + if tagInfo.ReservedIOPS != nil { + heap.Push(q.reservationQueue, &reservationMQueueItem{r: r}) + } + heap.Push(q.sharesQueue, &sharesMQueueItem{r: r}) + heap.Push(q.limitQueue, &limitMQueueItem{r: r}) + q.scheduleRequest(true) + + return r, q.requestCompleted, nil +} + +func (q *MClock) adjustTags(now float64, idleTag string) { + if q.sharesQueue.Len() == 0 { + return + } + minShare := q.sharesQueue.items[0].ts() + for _, item := range q.limitQueue.items { // limitQueue has all requests and sharesQueue may be fixed + limitItem := item.(*limitMQueueItem) + if limitItem.r.tag == idleTag { + continue + } + limitItem.r.shares -= (minShare - now) + if limitItem.r.sharesIdx != invalidIndex { + heap.Fix(q.sharesQueue, limitItem.r.sharesIdx) + } + if limitItem.r.readyIdx != invalidIndex { + heap.Fix(q.readyQueue, limitItem.r.readyIdx) + } + } +} + +func (q *MClock) scheduleRequest(lockTaken bool) { + if !lockTaken { + q.mtx.Lock() + defer q.mtx.Unlock() + } + + if q.inProgress >= q.runLimit { + return + } + now := q.clock.now() + q.scheduleByReservation(now) + if q.inProgress >= q.runLimit { + return + } + q.scheduleByLimitAndWeight(now) + if q.inProgress >= q.runLimit || (q.reservationQueue.Len() == 0 && q.limitQueue.Len() == 0) { + return + } + q.setNextScheduleTimer(now) +} + +func (q *MClock) setNextScheduleTimer(now float64) { + nextTs := math.MaxFloat64 + if q.reservationQueue.Len() > 0 { + nextTs = q.reservationQueue.items[0].ts() + } + if q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() < nextTs { + nextTs = q.limitQueue.items[0].ts() + } + + if q.lastSchedule < now && q.lastSchedule > nextTs { + q.clock.runAt(nextTs, func() { + q.scheduleRequest(false) + }) + q.lastSchedule = nextTs + } +} + +func (q *MClock) scheduleByLimitAndWeight(now float64) { + for q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() <= now { + ready := heap.Pop(q.limitQueue).(*limitMQueueItem) + heap.Push(q.readyQueue, &readyMQueueItem{r: ready.r}) + } + + for q.inProgress < q.runLimit && q.readyQueue.Len() > 0 { + next := heap.Pop(q.readyQueue).(*readyMQueueItem) + hadReservation := false + if next.r.reservationIdx != invalidIndex { + hadReservation = true + heap.Remove(q.reservationQueue, next.r.reservationIdx) + } + q.removeFromQueues(next.r) + + tagInfo, ok := q.tagInfo[next.r.tag] + if !ok { + panic("unknown tag: " + next.r.tag) // must be checked on top level + } + if tagInfo.ReservedIOPS != nil && hadReservation { + var updated bool + for _, i := range q.reservationQueue.items { + ri := i.(*reservationMQueueItem) + if ri.r.tag == next.r.tag && ri.r.reservation > next.r.reservation { + ri.r.reservation -= 1.0 / *tagInfo.ReservedIOPS + updated = true + } + } + if updated { + heap.Init(q.reservationQueue) + } + } + + select { + case <-next.r.canceled: + continue + default: + } + + assertIndexInvalid(next.r) + q.inProgress++ + close(next.r.scheduled) + } +} + +func (q *MClock) scheduleByReservation(now float64) { + for q.inProgress < q.runLimit && q.reservationQueue.Len() > 0 && q.reservationQueue.items[0].ts() <= now { + next := heap.Pop(q.reservationQueue).(*reservationMQueueItem) + q.removeFromQueues(next.r) + + select { + case <-next.r.canceled: + continue + default: + } + + assertIndexInvalid(next.r) + q.inProgress++ + close(next.r.scheduled) + } +} + +func (q *MClock) removeFromQueues(r *request) { + if r.limitIdx != invalidIndex { + heap.Remove(q.limitQueue, r.limitIdx) + } + if r.sharesIdx != invalidIndex { + heap.Remove(q.sharesQueue, r.sharesIdx) + } + if r.readyIdx != invalidIndex { + heap.Remove(q.readyQueue, r.readyIdx) + } + if r.reservationIdx != invalidIndex { + heap.Remove(q.reservationQueue, r.reservationIdx) + } +} + +func (q *MClock) requestCompleted() { + q.mtx.Lock() + defer q.mtx.Unlock() + + if q.closed { + return + } + + if q.inProgress == 0 { + panic("invalid requests count") + } + q.inProgress-- + q.scheduleRequest(true) +} + +func assertIndexInvalid(r *request) { + if r.limitIdx != invalidIndex { + panic("limitIdx is not -1") + } + if r.sharesIdx != invalidIndex { + panic("sharesIdx is not -1") + } + if r.reservationIdx != invalidIndex { + panic("reservationIdx is not -1") + } + if r.readyIdx != invalidIndex { + panic("readyIdx is not -1") + } +} + +// Len implements heap.Interface. +func (q *queue) Len() int { + return len(q.items) +} + +// Less implements heap.Interface. +func (q *queue) Less(i int, j int) bool { + return q.items[i].ts() < q.items[j].ts() +} + +// Pop implements heap.Interface. +func (q *queue) Pop() any { + n := len(q.items) + item := q.items[n-1] + q.items[n-1] = nil + q.items = q.items[0 : n-1] + item.setIndex(invalidIndex) + return item +} + +// Push implements heap.Interface. +func (q *queue) Push(x any) { + it := x.(queueItem) + it.setIndex(q.Len()) + q.items = append(q.items, it) +} + +// Swap implements heap.Interface. +func (q *queue) Swap(i int, j int) { + q.items[i], q.items[j] = q.items[j], q.items[i] + q.items[i].setIndex(i) + q.items[j].setIndex(j) +} + +var _ queueItem = &reservationMQueueItem{} + +type reservationMQueueItem struct { + r *request +} + +func (i *reservationMQueueItem) ts() float64 { + return i.r.reservation +} + +func (i *reservationMQueueItem) setIndex(idx int) { + i.r.reservationIdx = idx +} + +var _ queueItem = &limitMQueueItem{} + +type limitMQueueItem struct { + r *request +} + +func (i *limitMQueueItem) ts() float64 { + return i.r.limit +} + +func (i *limitMQueueItem) setIndex(idx int) { + i.r.limitIdx = idx +} + +var _ queueItem = &sharesMQueueItem{} + +type sharesMQueueItem struct { + r *request +} + +func (i *sharesMQueueItem) ts() float64 { + return i.r.shares +} + +func (i *sharesMQueueItem) setIndex(idx int) { + i.r.sharesIdx = idx +} + +var _ queueItem = &readyMQueueItem{} + +type readyMQueueItem struct { + r *request +} + +func (i *readyMQueueItem) ts() float64 { + return i.r.shares +} + +func (i *readyMQueueItem) setIndex(idx int) { + i.r.readyIdx = idx +} + +type scheduleInfo struct { + ts float64 + f func() +} + +type systemClock struct { + since time.Time + schedule chan scheduleInfo + wg sync.WaitGroup +} + +func newSystemClock() *systemClock { + c := &systemClock{ + since: time.Now(), + schedule: make(chan scheduleInfo), + } + c.start() + return c +} + +func (c *systemClock) now() float64 { + return time.Since(c.since).Seconds() +} + +func (c *systemClock) runAt(ts float64, f func()) { + c.schedule <- scheduleInfo{ts: ts, f: f} +} + +func (c *systemClock) close() { + close(c.schedule) + c.wg.Wait() +} + +func (c *systemClock) start() { + c.wg.Add(1) + go func() { + defer c.wg.Done() + t := time.NewTimer(time.Hour) + var f func() + for { + select { + case <-t.C: + if f != nil { + f() + f = nil + } + t.Reset(time.Hour) + case s, ok := <-c.schedule: + if !ok { + return + } + now := c.now() + if now >= s.ts { + s.f() + f = nil + continue + } + if !t.Stop() { + select { + case <-t.C: + default: + } + } + t.Reset(time.Duration((s.ts - now) * 1e9)) + f = s.f + } + } + }() +} diff --git a/scheduling/mclock_bench.result b/scheduling/mclock_bench.result new file mode 100644 index 0000000..fa43dc4 --- /dev/null +++ b/scheduling/mclock_bench.result @@ -0,0 +1,172 @@ +Running tool: /usr/local/go/bin/go test -benchmem -run=^$ -tags integration -bench ^BenchmarkMClock$ git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling -count=1 + +goos: linux +goarch: amd64 +pkg: git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling +cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz +BenchmarkMClock/noop,_1_parallelism-8 8660 140071 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_1_tags-8 8433 144946 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_2_tags-8 8529 144497 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_4_tags-8 7638 144267 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_8_tags-8 8392 144710 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_16_tags-8 8474 143977 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_1_tags-8 8374 143286 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_2_tags-8 7911 144500 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_4_tags-8 7332 144296 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_8_tags-8 7830 144359 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_16_tags-8 7839 145112 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_1_tags-8 7750 143561 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_2_tags-8 7840 143975 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_4_tags-8 7886 143822 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_8_tags-8 8251 144555 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_16_tags-8 7665 144781 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_1_tags-8 7881 145169 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_2_tags-8 8386 143578 ns/op 369 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_4_tags-8 8274 143942 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_8_tags-8 7830 143690 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_16_tags-8 7718 142707 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_1_tags-8 6446 180746 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_2_tags-8 6412 165058 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_4_tags-8 7323 156572 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_8_tags-8 8360 151004 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_16_tags-8 7712 147576 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_1_tags-8 6020 178971 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_2_tags-8 6448 165123 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_4_tags-8 6806 164651 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_8_tags-8 7284 152613 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_16_tags-8 7825 147727 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_1_tags-8 5780 188006 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_2_tags-8 6483 172047 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_4_tags-8 7290 158680 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_8_tags-8 6862 148069 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_16_tags-8 7749 147112 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_1_tags-8 5766 175459 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_2_tags-8 7200 161870 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_4_tags-8 7300 152912 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_8_tags-8 7208 148916 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_16_tags-8 7648 146154 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/noop,_8_parallelism-8 8521 140329 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_1_tags-8 7728 143902 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_2_tags-8 8414 144178 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_4_tags-8 8403 145010 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_8_tags-8 8346 143279 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_16_tags-8 7945 141189 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_1_tags-8 7820 141144 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_2_tags-8 8460 143132 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_4_tags-8 8343 144865 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_8_tags-8 8383 143854 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_16_tags-8 8379 144622 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_1_tags-8 7818 144074 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_2_tags-8 8511 145416 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_4_tags-8 8350 144417 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_8_tags-8 8364 144918 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_16_tags-8 7776 143588 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_1_tags-8 8356 144611 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_2_tags-8 7828 142666 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_4_tags-8 7870 142888 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_8_tags-8 8528 140395 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_16_tags-8 8342 142833 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_1_tags-8 5640 187720 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_2_tags-8 6830 177689 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_4_tags-8 7209 156308 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_8_tags-8 7832 152150 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_16_tags-8 7520 145315 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_1_tags-8 5520 198036 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_2_tags-8 6430 171407 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_4_tags-8 7269 159044 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_8_tags-8 7760 147757 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_16_tags-8 7794 145792 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_1_tags-8 5510 199098 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_2_tags-8 7602 177956 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_4_tags-8 6955 160300 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_8_tags-8 7950 146992 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_16_tags-8 7870 145343 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_1_tags-8 6033 189148 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_2_tags-8 6764 172016 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_4_tags-8 7255 156037 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_8_tags-8 7879 150515 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_16_tags-8 7802 147904 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/noop,_32_parallelism-8 7870 139959 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_1_tags-8 8146 141951 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_2_tags-8 7737 143994 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_4_tags-8 8444 143977 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_8_tags-8 8367 142965 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_16_tags-8 7807 142984 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_1_tags-8 8326 142276 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_2_tags-8 8164 142354 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_4_tags-8 8367 143149 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_8_tags-8 7864 143681 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_16_tags-8 7666 143557 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_1_tags-8 8354 142904 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_2_tags-8 8210 143932 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_4_tags-8 8328 143229 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_8_tags-8 8224 142964 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_16_tags-8 8398 142558 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_1_tags-8 7723 142118 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_2_tags-8 8337 143492 ns/op 369 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_4_tags-8 7651 144301 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_8_tags-8 8320 143327 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_16_tags-8 8214 143211 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_1_tags-8 6573 172171 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_2_tags-8 7261 158054 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_4_tags-8 7264 151381 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_8_tags-8 7887 149740 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_16_tags-8 7783 145891 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_1_tags-8 6448 184402 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_2_tags-8 7142 170111 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_4_tags-8 6856 157931 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_8_tags-8 7332 147039 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_16_tags-8 8328 145941 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_1_tags-8 6741 170048 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_2_tags-8 6813 162057 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_4_tags-8 7334 152023 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_8_tags-8 7390 151674 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_16_tags-8 7299 144482 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_1_tags-8 6078 180087 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_2_tags-8 6906 171037 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_4_tags-8 7348 161815 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_8_tags-8 7352 150144 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_16_tags-8 8432 148060 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/noop,_64_parallelism-8 7905 139440 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_1_tags-8 7698 143624 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_2_tags-8 7994 142888 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_4_tags-8 8451 142612 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_8_tags-8 8332 141805 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_16_tags-8 7700 144190 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_1_tags-8 8425 143468 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_2_tags-8 8294 143356 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_4_tags-8 7993 143701 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_8_tags-8 8104 142619 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_16_tags-8 7333 143398 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_1_tags-8 8396 143165 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_2_tags-8 8485 143813 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_4_tags-8 8193 144148 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_8_tags-8 7712 143123 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_16_tags-8 7663 144352 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_1_tags-8 7795 143937 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_2_tags-8 8484 144034 ns/op 369 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_4_tags-8 7846 142858 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_8_tags-8 8320 143052 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_16_tags-8 8484 142492 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_1_tags-8 5718 178028 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_2_tags-8 6993 160263 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_4_tags-8 6818 152746 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_8_tags-8 7684 149975 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_16_tags-8 7791 145647 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_1_tags-8 6402 190525 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_2_tags-8 6108 175412 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_4_tags-8 7340 159547 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_8_tags-8 7800 153072 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_16_tags-8 7863 146726 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_1_tags-8 5761 175532 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_2_tags-8 6433 165923 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_4_tags-8 7309 153218 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_8_tags-8 7173 148557 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_16_tags-8 8353 146923 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_1_tags-8 6364 172028 ns/op 369 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_2_tags-8 7300 161579 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_4_tags-8 6910 153875 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_8_tags-8 7945 147313 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_16_tags-8 7848 146027 ns/op 366 B/op 8 allocs/op +PASS +ok git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling 192.364s diff --git a/scheduling/mclock_bench_test.go b/scheduling/mclock_bench_test.go new file mode 100644 index 0000000..9888a40 --- /dev/null +++ b/scheduling/mclock_bench_test.go @@ -0,0 +1,87 @@ +package scheduling + +import ( + "context" + "fmt" + "math" + "math/rand/v2" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type noopMClockScheduler struct{} + +var ( + releaseStub ReleaseFunc = func() {} + defaultLimit float64 = 100_000 + shortReservation float64 = 1 + medReservation float64 = 100 + largeReservation float64 = 10_000 +) + +func (s *noopMClockScheduler) RequestArrival(context.Context, string) ReleaseFunc { + return releaseStub +} + +func BenchmarkMClock(b *testing.B) { + tagsCount := []int{1, 2, 4, 8, 16} + ioDuration := time.Millisecond + parallelismValues := []int{1, 8, 32, 64} + limits := []*float64{nil, &defaultLimit} + reservations := []*float64{nil, &shortReservation, &medReservation, &largeReservation} + for _, parallelism := range parallelismValues { + b.SetParallelism(parallelism) + + noopMClock := &noopMClockScheduler{} + b.Run(fmt.Sprintf("noop, %d parallelism", parallelism), func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + release := noopMClock.RequestArrival(context.Background(), "tag") + time.Sleep(ioDuration) + release() + } + }) + }) + + for _, limit := range limits { + for _, reservation := range reservations { + for _, tags := range tagsCount { + tagInfos := make(map[string]TagInfo) + for tag := 0; tag < tags; tag++ { + tagInfos["tag"+strconv.FormatInt(int64(tag), 10)] = TagInfo{Share: 50, LimitIOPS: limit, ReservedIOPS: reservation} + } + + mClockQ, _ := NewMClock(math.MaxUint64, math.MaxUint64, tagInfos, math.MaxFloat64) + + resStr := "no" + if reservation != nil { + resStr = strconv.FormatFloat(*reservation, 'f', 1, 64) + } + limitStr := "no" + if limit != nil { + limitStr = strconv.FormatFloat(*limit, 'f', 1, 64) + } + b.Run(fmt.Sprintf("mclock, %s limit, %s reservation, %d parallelism, %d tags", limitStr, resStr, parallelism, tags), func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + tag := rand.Int64N(int64(tags)) + release, err := mClockQ.RequestArrival(context.Background(), "tag"+strconv.FormatInt(int64(tag), 10)) + require.NoError(b, err) + time.Sleep(ioDuration) + release() + } + }) + }) + } + } + } + + } +} diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go new file mode 100644 index 0000000..7f0edc8 --- /dev/null +++ b/scheduling/mclock_test.go @@ -0,0 +1,459 @@ +package scheduling + +import ( + "context" + "math" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestMClockSharesScheduling(t *testing.T) { + t.Parallel() + reqCount := 1000 + reqCount = (reqCount / 2) * 2 + q, err := NewMClock(1, math.MaxUint64, map[string]TagInfo{ + "class1": {Share: 2}, + "class2": {Share: 1}, + }, 100) + require.NoError(t, err) + q.clock = &noopClock{} + + var releases []ReleaseFunc + var requests []*request + tag := "class1" + for i := 0; i < reqCount/2; i++ { + req, release, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + releases = append(releases, release) + } + tag = "class2" + for i := 0; i < reqCount/2; i++ { + req, release, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + releases = append(releases, release) + } + + var result []string + var wg sync.WaitGroup + for i := 0; i < reqCount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + <-requests[i].scheduled + result = append(result, requests[i].tag) + releases[i]() + }() + } + wg.Wait() + + // Requests must be scheduled as class1->class1->class2->class1->class1->class2..., + // because the ratio is 2 to 1. + // However, there may be deviations due to rounding and sorting. + result = result[:reqCount/2+(reqCount/2)/2] // last reqCount/4 requests is class2 tail + var class1Count int + var class2Count int + var class2MaxSeq int + for _, res := range result { + switch res { + case "class1": + class1Count++ + class2MaxSeq = 0 + case "class2": + class2Count++ + class2MaxSeq++ + require.Less(t, class2MaxSeq, 3) // not expected to have more than 2 class2 requests scheduled in row + default: + require.Fail(t, "unknown tag") + } + } + + require.True(t, (class1Count*100)/(class1Count+class2Count) == 66) +} + +var _ clock = &noopClock{} + +type noopClock struct { + v float64 +} + +func (n *noopClock) now() float64 { + return n.v +} + +func (n *noopClock) runAt(ts float64, f func()) {} + +func (n *noopClock) close() {} + +func TestMClockRequestCancel(t *testing.T) { + t.Parallel() + q, err := NewMClock(1, math.MaxUint64, map[string]TagInfo{ + "class1": {Share: 2}, + "class2": {Share: 1}, + }, 100) + require.NoError(t, err) + q.clock = &noopClock{} + + release1, err := q.RequestArrival(context.Background(), "class1") + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + release2, err := q.RequestArrival(ctx, "class1") + require.Nil(t, release2) + require.ErrorIs(t, err, context.DeadlineExceeded) + + require.Equal(t, 0, q.readyQueue.Len()) + require.Equal(t, 0, q.sharesQueue.Len()) + require.Equal(t, 0, q.limitQueue.Len()) + require.Equal(t, 0, q.reservationQueue.Len()) + + release1() +} + +func TestMClockLimitScheduling(t *testing.T) { + t.Parallel() + reqCount := 100 + reqCount = (reqCount / 2) * 2 + limit := 1.0 + cl := &noopClock{} + q, err := NewMClock(1, math.MaxUint64, map[string]TagInfo{ + "class1": {Share: 2, LimitIOPS: &limit}, + "class2": {Share: 1, LimitIOPS: &limit}, + }, 100) + require.NoError(t, err) + q.clock = cl + + var releases []ReleaseFunc + var requests []*request + tag := "class1" + for i := 0; i < reqCount/2; i++ { + req, release, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + releases = append(releases, release) + } + tag = "class2" + for i := 0; i < reqCount/2; i++ { + req, release, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + releases = append(releases, release) + } + + q.scheduleRequest(false) + + for _, req := range requests { + select { + case <-req.scheduled: + require.Fail(t, "no request must be scheduled because of time is 0.0 but limit values are greater than 0.0") + default: + } + } + + cl.v = math.MaxFloat64 + + var result []string + var wg sync.WaitGroup + for i := 0; i < reqCount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + <-requests[i].scheduled + result = append(result, requests[i].tag) + releases[i]() + }() + } + q.scheduleRequest(false) + wg.Wait() + + // Requests must be scheduled as class1->class1->class2->class1->class1->class2..., + // because the ratio is 2 to 1. + // However, there may be deviations due to rounding and sorting. + result = result[:reqCount/2+(reqCount/2)/2] // last reqCount/4 requests is class2 tail + var class1Count int + var class2Count int + var class2MaxSeq int + for _, res := range result { + switch res { + case "class1": + class1Count++ + class2MaxSeq = 0 + case "class2": + class2Count++ + class2MaxSeq++ + require.Less(t, class2MaxSeq, 3) // not expected to have more than 2 class2 requests scheduled in row + default: + require.Fail(t, "unknown tag") + } + } + + require.True(t, (class1Count*100)/(class1Count+class2Count) == 66) + + require.Equal(t, 0, q.readyQueue.Len()) + require.Equal(t, 0, q.sharesQueue.Len()) + require.Equal(t, 0, q.limitQueue.Len()) + require.Equal(t, 0, q.reservationQueue.Len()) +} + +func TestMClockReservationScheduling(t *testing.T) { + t.Parallel() + reqCount := 1000 + reqCount = (reqCount / 2) * 2 + limit := 0.01 // 1 request in 100 seconds + resevation := 100.0 // 100 RPS + cl := &noopClock{} + q, err := NewMClock(uint64(reqCount), math.MaxUint64, map[string]TagInfo{ + "class1": {Share: 2, LimitIOPS: &limit}, + "class2": {Share: 1, LimitIOPS: &limit, ReservedIOPS: &resevation}, + }, 100) + require.NoError(t, err) + q.clock = cl + + var releases []ReleaseFunc + var requests []*request + tag := "class1" + for i := 0; i < reqCount/2; i++ { + req, release, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + releases = append(releases, release) + } + tag = "class2" + for i := 0; i < reqCount/2; i++ { + req, release, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + releases = append(releases, release) + } + + q.scheduleRequest(false) + + for _, req := range requests { + select { + case <-req.scheduled: + require.Fail(t, "no request must be scheduled because of time is 0.0 but limit values are greater than 0.0") + default: + } + } + + cl.v = 1.00001 // 1s elapsed + q.scheduleRequest(false) + + var result []string + for i, req := range requests { + select { + case <-req.scheduled: + result = append(result, requests[i].tag) + releases[i]() + default: + } + } + + require.Equal(t, 100, len(result)) + for _, res := range result { + require.Equal(t, "class2", res) + } + + cl.v = math.MaxFloat64 + q.scheduleRequest(false) + + require.Equal(t, 0, q.readyQueue.Len()) + require.Equal(t, 0, q.sharesQueue.Len()) + require.Equal(t, 0, q.limitQueue.Len()) + require.Equal(t, 0, q.reservationQueue.Len()) +} + +func TestMClockIdleTag(t *testing.T) { + t.Parallel() + reqCount := 100 + idleTimeout := 2.0 + cl := &noopClock{} + q, err := NewMClock(1, math.MaxUint64, map[string]TagInfo{ + "class1": {Share: 1}, + "class2": {Share: 1}, + }, idleTimeout) + require.NoError(t, err) + q.clock = cl + + var requests []*request + tag := "class1" + for i := 0; i < reqCount/2; i++ { + cl.v += idleTimeout / 2 + req, _, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + } + + // class1 requests have shares [1.0; 2.0; 3.0; ... ] + + cl.v += 2 * idleTimeout + + tag = "class2" + req, _, err := q.pushRequest(tag) + require.NoError(t, err) + requests = append(requests, req) + + // class2 must be defined as idle, so all shares tags must be adjusted. + + for _, req := range requests { + select { + case <-req.scheduled: + default: + require.True(t, req.shares >= cl.v) + } + } +} + +func TestMClockClose(t *testing.T) { + t.Parallel() + q, err := NewMClock(1, math.MaxUint64, map[string]TagInfo{ + "class1": {Share: 1}, + }, 1000) + require.NoError(t, err) + q.clock = &noopClock{} + + requestRunning := make(chan struct{}) + checkDone := make(chan struct{}) + eg, ctx := errgroup.WithContext(context.Background()) + tag := "class1" + eg.Go(func() error { + release, err := q.RequestArrival(ctx, tag) + if err != nil { + return err + } + defer release() + close(requestRunning) + <-checkDone + return nil + }) + <-requestRunning + + eg.Go(func() error { + release, err := q.RequestArrival(ctx, tag) + require.Nil(t, release) + require.ErrorIs(t, err, ErrMClockSchedulerClosed) + return nil + }) + + // wait until second request will be blocked on wait + for q.waitingCount() == 0 { + time.Sleep(1 * time.Second) + } + + q.Close() + + release, err := q.RequestArrival(context.Background(), tag) + require.Nil(t, release) + require.ErrorIs(t, err, ErrMClockSchedulerClosed) + + close(checkDone) + + require.NoError(t, eg.Wait()) +} + +func TestMClockWaitLimit(t *testing.T) { + t.Parallel() + q, err := NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1}, + }, 1000) + require.NoError(t, err) + q.clock = &noopClock{} + defer q.Close() + + requestRunning := make(chan struct{}) + checkDone := make(chan struct{}) + eg, ctx := errgroup.WithContext(context.Background()) + tag := "class1" + // running request + eg.Go(func() error { + release, err := q.RequestArrival(ctx, tag) + if err != nil { + return err + } + defer release() + close(requestRunning) + <-checkDone + return nil + }) + + // waiting request + eg.Go(func() error { + <-requestRunning + release, err := q.RequestArrival(ctx, tag) + require.NotNil(t, release) + require.NoError(t, err) + defer release() + <-checkDone + return nil + }) + + // wait until second request will be waiting + for q.waitingCount() == 0 { + time.Sleep(1 * time.Second) + } + + release, err := q.RequestArrival(ctx, tag) + require.Nil(t, release) + require.ErrorIs(t, err, ErrMClockSchedulerRequestLimitExceeded) + + close(checkDone) + require.NoError(t, eg.Wait()) +} + +func TestMClockParameterValidation(t *testing.T) { + _, err := NewMClock(0, 1, map[string]TagInfo{ + "class1": {Share: 1}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidRunLimit) + _, err = NewMClock(1, 0, map[string]TagInfo{ + "class1": {Share: 1}, + }, 1000) + require.NoError(t, err) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1}, + }, -1.0) + require.NoError(t, err) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1}, + }, float64(0)) + require.NoError(t, err) + negativeValue := -1.0 + zeroValue := float64(0) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: negativeValue}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidTagInfo) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: zeroValue}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidTagInfo) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1.0, ReservedIOPS: &zeroValue}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidTagInfo) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1.0, ReservedIOPS: &negativeValue}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidTagInfo) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1.0, LimitIOPS: &zeroValue}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidTagInfo) + _, err = NewMClock(1, 1, map[string]TagInfo{ + "class1": {Share: 1.0, LimitIOPS: &negativeValue}, + }, 1000) + require.ErrorIs(t, err, ErrInvalidTagInfo) +} + +func (q *MClock) waitingCount() int { + q.mtx.Lock() + defer q.mtx.Unlock() + + return q.sharesQueue.Len() +} From 7833c3aac575254e562db71a32c8563172460ffa Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 20 Jan 2025 17:14:39 +0300 Subject: [PATCH 04/29] [#1] Makefile: Create directory with parents Signed-off-by: Dmitrii Stepanov --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index c8f2da3..731131a 100755 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ pre-commit-run: # Install linters lint-install: @rm -rf $(LINT_DIR) - @mkdir $(LINT_DIR) + @mkdir -p $(LINT_DIR) @CGO_ENABLED=1 GOBIN=$(LINT_VERSION_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) # Run linters @@ -69,7 +69,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir $(STATICCHECK_DIR) + @mkdir -p $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -82,7 +82,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir $(GOPLS_DIR) + @mkdir -p $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls @@ -100,7 +100,7 @@ gopls-run: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir $(GOFUMPT_DIR) + @mkdir -p $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt From f51c095236bd197011d049d36e4f6f40778fa83a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 20 Jan 2025 17:26:14 +0300 Subject: [PATCH 05/29] [#1] pre-commit: Use local golangci-lint hook Signed-off-by: Dmitrii Stepanov --- .pre-commit-config.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 946e000..6d0e1e4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,11 +23,6 @@ repos: hooks: - id: shellcheck - - repo: https://github.com/golangci/golangci-lint - rev: v1.59.1 - hooks: - - id: golangci-lint - - repo: local hooks: - id: go-unit-tests @@ -36,3 +31,9 @@ repos: pass_filenames: false types: [go] language: system + - id: golangci-lint + name: golangci-lint check + entry: make lint + pass_filenames: false + types: [go] + language: system From e18d1a7c4550cf4355ab651976a38406b069e559 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 24 Jan 2025 12:07:14 +0300 Subject: [PATCH 06/29] [#1] mclock: Fix bench format Signed-off-by: Dmitrii Stepanov --- scheduling/mclock_bench.result | 330 ++++++++++++++++---------------- scheduling/mclock_bench_test.go | 4 +- 2 files changed, 167 insertions(+), 167 deletions(-) diff --git a/scheduling/mclock_bench.result b/scheduling/mclock_bench.result index fa43dc4..c12f59d 100644 --- a/scheduling/mclock_bench.result +++ b/scheduling/mclock_bench.result @@ -4,169 +4,169 @@ goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz -BenchmarkMClock/noop,_1_parallelism-8 8660 140071 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_1_tags-8 8433 144946 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_2_tags-8 8529 144497 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_4_tags-8 7638 144267 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_8_tags-8 8392 144710 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_16_tags-8 8474 143977 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_1_tags-8 8374 143286 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_2_tags-8 7911 144500 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_4_tags-8 7332 144296 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_8_tags-8 7830 144359 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_16_tags-8 7839 145112 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_1_tags-8 7750 143561 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_2_tags-8 7840 143975 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_4_tags-8 7886 143822 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_8_tags-8 8251 144555 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_16_tags-8 7665 144781 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_1_tags-8 7881 145169 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_2_tags-8 8386 143578 ns/op 369 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_4_tags-8 8274 143942 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_8_tags-8 7830 143690 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_16_tags-8 7718 142707 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_1_tags-8 6446 180746 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_2_tags-8 6412 165058 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_4_tags-8 7323 156572 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_8_tags-8 8360 151004 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_16_tags-8 7712 147576 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_1_tags-8 6020 178971 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_2_tags-8 6448 165123 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_4_tags-8 6806 164651 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_8_tags-8 7284 152613 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_16_tags-8 7825 147727 ns/op 374 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_1_tags-8 5780 188006 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_2_tags-8 6483 172047 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_4_tags-8 7290 158680 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_8_tags-8 6862 148069 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_16_tags-8 7749 147112 ns/op 374 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_1_tags-8 5766 175459 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_2_tags-8 7200 161870 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_4_tags-8 7300 152912 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_8_tags-8 7208 148916 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_16_tags-8 7648 146154 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/noop,_8_parallelism-8 8521 140329 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_1_tags-8 7728 143902 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_2_tags-8 8414 144178 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_4_tags-8 8403 145010 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_8_tags-8 8346 143279 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_16_tags-8 7945 141189 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_1_tags-8 7820 141144 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_2_tags-8 8460 143132 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_4_tags-8 8343 144865 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_8_tags-8 8383 143854 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_16_tags-8 8379 144622 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_1_tags-8 7818 144074 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_2_tags-8 8511 145416 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_4_tags-8 8350 144417 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_8_tags-8 8364 144918 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_16_tags-8 7776 143588 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_1_tags-8 8356 144611 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_2_tags-8 7828 142666 ns/op 370 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_4_tags-8 7870 142888 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_8_tags-8 8528 140395 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_16_tags-8 8342 142833 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_1_tags-8 5640 187720 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_2_tags-8 6830 177689 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_4_tags-8 7209 156308 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_8_tags-8 7832 152150 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_16_tags-8 7520 145315 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_1_tags-8 5520 198036 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_2_tags-8 6430 171407 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_4_tags-8 7269 159044 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_8_tags-8 7760 147757 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_16_tags-8 7794 145792 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_1_tags-8 5510 199098 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_2_tags-8 7602 177956 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_4_tags-8 6955 160300 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_8_tags-8 7950 146992 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_16_tags-8 7870 145343 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_1_tags-8 6033 189148 ns/op 370 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_2_tags-8 6764 172016 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_4_tags-8 7255 156037 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_8_tags-8 7879 150515 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_16_tags-8 7802 147904 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/noop,_32_parallelism-8 7870 139959 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_1_tags-8 8146 141951 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_2_tags-8 7737 143994 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_4_tags-8 8444 143977 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_8_tags-8 8367 142965 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_16_tags-8 7807 142984 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_1_tags-8 8326 142276 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_2_tags-8 8164 142354 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_4_tags-8 8367 143149 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_8_tags-8 7864 143681 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_16_tags-8 7666 143557 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_1_tags-8 8354 142904 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_2_tags-8 8210 143932 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_4_tags-8 8328 143229 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_8_tags-8 8224 142964 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_16_tags-8 8398 142558 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_1_tags-8 7723 142118 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_2_tags-8 8337 143492 ns/op 369 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_4_tags-8 7651 144301 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_8_tags-8 8320 143327 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_16_tags-8 8214 143211 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_1_tags-8 6573 172171 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_2_tags-8 7261 158054 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_4_tags-8 7264 151381 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_8_tags-8 7887 149740 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_16_tags-8 7783 145891 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_1_tags-8 6448 184402 ns/op 374 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_2_tags-8 7142 170111 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_4_tags-8 6856 157931 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_8_tags-8 7332 147039 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_16_tags-8 8328 145941 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_1_tags-8 6741 170048 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_2_tags-8 6813 162057 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_4_tags-8 7334 152023 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_8_tags-8 7390 151674 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_16_tags-8 7299 144482 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_1_tags-8 6078 180087 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_2_tags-8 6906 171037 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_4_tags-8 7348 161815 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_8_tags-8 7352 150144 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_16_tags-8 8432 148060 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/noop,_64_parallelism-8 7905 139440 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_1_tags-8 7698 143624 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_2_tags-8 7994 142888 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_4_tags-8 8451 142612 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_8_tags-8 8332 141805 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_16_tags-8 7700 144190 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_1_tags-8 8425 143468 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_2_tags-8 8294 143356 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_4_tags-8 7993 143701 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_8_tags-8 8104 142619 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_16_tags-8 7333 143398 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_1_tags-8 8396 143165 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_2_tags-8 8485 143813 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_4_tags-8 8193 144148 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_8_tags-8 7712 143123 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_16_tags-8 7663 144352 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_1_tags-8 7795 143937 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_2_tags-8 8484 144034 ns/op 369 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_4_tags-8 7846 142858 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_8_tags-8 8320 143052 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_16_tags-8 8484 142492 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_1_tags-8 5718 178028 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_2_tags-8 6993 160263 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_4_tags-8 6818 152746 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_8_tags-8 7684 149975 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_16_tags-8 7791 145647 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_1_tags-8 6402 190525 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_2_tags-8 6108 175412 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_4_tags-8 7340 159547 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_8_tags-8 7800 153072 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_16_tags-8 7863 146726 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_1_tags-8 5761 175532 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_2_tags-8 6433 165923 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_4_tags-8 7309 153218 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_8_tags-8 7173 148557 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_16_tags-8 8353 146923 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_1_tags-8 6364 172028 ns/op 369 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_2_tags-8 7300 161579 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_4_tags-8 6910 153875 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_8_tags-8 7945 147313 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_16_tags-8 7848 146027 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=1-8 8612 138915 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=1-8 8461 140662 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=2-8 8409 140666 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=4-8 8462 139999 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=8-8 7742 141602 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=16-8 8434 144830 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=1-8 8517 140830 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=2-8 8565 141197 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=4-8 8412 140269 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=8-8 8402 140532 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=16-8 8371 140780 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=1-8 8522 139374 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=2-8 8468 141222 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=4-8 8508 139646 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=8-8 7807 141725 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=16-8 8422 141968 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=1-8 8524 142616 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=2-8 8541 141818 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=4-8 8499 141254 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=8-8 7896 141172 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=16-8 8421 141787 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=1-8 5787 206520 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=2-8 6122 183203 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=4-8 6860 165364 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=8-8 7252 152722 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=16-8 8497 147538 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=1-8 5797 207468 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=2-8 6188 183499 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=4-8 6867 162982 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=8-8 7897 151914 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=16-8 7846 147202 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=1-8 5733 203388 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=2-8 6469 181024 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=4-8 7272 164503 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=8-8 7747 152947 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=16-8 7788 148419 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=1-8 5216 202090 ns/op 369 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=2-8 6385 179155 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=4-8 7285 163441 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=8-8 7756 153210 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=16-8 7699 148362 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=8-8 8486 140380 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=1-8 8468 141433 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=2-8 8352 141888 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=4-8 8344 142430 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=8-8 8514 142403 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=16-8 8421 140276 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=1-8 8403 141441 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=2-8 8222 144061 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=4-8 8494 141430 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=8-8 7882 144836 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=16-8 8452 142626 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=1-8 8508 142037 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=2-8 7935 140502 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=4-8 8530 141432 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=8-8 8394 141469 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=16-8 8461 142834 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=1-8 7852 141090 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=2-8 8432 140560 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=4-8 8408 141365 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=8-8 8440 140667 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=16-8 8390 140262 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=1-8 5253 208697 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=2-8 6115 185026 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=4-8 6867 165016 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=8-8 7353 154066 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=16-8 8442 148492 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=1-8 5259 206154 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=2-8 6078 182541 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=4-8 7320 163852 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=8-8 7291 153780 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=16-8 7858 149710 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=1-8 5488 205942 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=2-8 6138 183340 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=4-8 7318 165545 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=8-8 7234 153066 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=16-8 7866 147706 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=1-8 5530 206905 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=2-8 5811 181518 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=4-8 7308 164970 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=8-8 7383 153095 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=16-8 7888 148116 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=32-8 8564 139403 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=1-8 8474 141378 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=2-8 8475 140987 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=4-8 8413 141971 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=8-8 8408 140339 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=16-8 8406 140755 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=1-8 8564 139868 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=2-8 8384 140484 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=4-8 8415 140980 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=8-8 8490 141347 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=16-8 7891 139910 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=1-8 8455 139776 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=2-8 8516 139899 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=4-8 8470 142687 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=8-8 7915 139871 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=16-8 8492 140567 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=1-8 8500 140866 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=2-8 8456 140401 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=4-8 8456 141583 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=8-8 8484 141490 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=16-8 8419 138953 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=1-8 5017 208595 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=2-8 5794 182831 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=4-8 7376 165324 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=8-8 7717 153329 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=16-8 7904 146121 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=1-8 5757 202600 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=2-8 6402 177298 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=4-8 7309 162638 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=8-8 7866 151833 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=16-8 7820 146303 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=1-8 5188 209597 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=2-8 6481 182621 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=4-8 7294 164115 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=8-8 7377 154026 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=16-8 7881 147141 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=1-8 5802 208740 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=2-8 5842 181415 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=4-8 6537 166779 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=8-8 7916 154014 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=16-8 7298 146826 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=64-8 8612 139050 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=1-8 8612 142611 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=2-8 8496 141572 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=4-8 7894 141572 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=8-8 8539 140833 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=16-8 8424 140045 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=1-8 8478 141058 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=2-8 8464 140962 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=4-8 8516 142536 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=8-8 8362 141785 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=16-8 8432 139365 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=1-8 8492 140547 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=2-8 8419 139413 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=4-8 8463 140481 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=8-8 8496 141455 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=16-8 8408 139777 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=1-8 8467 141276 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=2-8 8481 140046 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=4-8 7881 141473 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=8-8 8514 141032 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=16-8 7956 140117 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=1-8 5254 209386 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=2-8 6103 183966 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=4-8 7315 164810 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=8-8 7387 153669 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=16-8 7950 148866 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=1-8 6088 206754 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=2-8 6068 183744 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=4-8 6855 164447 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=8-8 7850 151862 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=16-8 8410 147936 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=1-8 5718 208111 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=2-8 6127 181511 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=4-8 7296 164592 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=8-8 7820 152057 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=16-8 7815 147004 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=1-8 5493 208340 ns/op 369 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=2-8 6524 181903 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=4-8 7359 162915 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=8-8 7855 151878 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=16-8 7861 146234 ns/op 367 B/op 8 allocs/op PASS -ok git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling 192.364s +ok git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling 195.781s diff --git a/scheduling/mclock_bench_test.go b/scheduling/mclock_bench_test.go index 9888a40..f18f421 100644 --- a/scheduling/mclock_bench_test.go +++ b/scheduling/mclock_bench_test.go @@ -36,7 +36,7 @@ func BenchmarkMClock(b *testing.B) { b.SetParallelism(parallelism) noopMClock := &noopMClockScheduler{} - b.Run(fmt.Sprintf("noop, %d parallelism", parallelism), func(b *testing.B) { + b.Run(fmt.Sprintf("impl=noop/parallelism=%d", parallelism), func(b *testing.B) { b.ResetTimer() b.ReportAllocs() b.RunParallel(func(pb *testing.PB) { @@ -66,7 +66,7 @@ func BenchmarkMClock(b *testing.B) { if limit != nil { limitStr = strconv.FormatFloat(*limit, 'f', 1, 64) } - b.Run(fmt.Sprintf("mclock, %s limit, %s reservation, %d parallelism, %d tags", limitStr, resStr, parallelism, tags), func(b *testing.B) { + b.Run(fmt.Sprintf("impl=mclock/limit=%s/reservation=%s/parallelism=%d/tags=%d", limitStr, resStr, parallelism, tags), func(b *testing.B) { b.ResetTimer() b.ReportAllocs() b.RunParallel(func(pb *testing.PB) { From 54b4bf7cc108a5b2abdf7e5d18d6ce6d217f54ed Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 24 Jan 2025 12:18:16 +0300 Subject: [PATCH 07/29] [#1] mclock: Fix time based scheduling Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 35 ++-- scheduling/mclock_bench.result | 330 ++++++++++++++++----------------- scheduling/mclock_test.go | 40 +++- 3 files changed, 223 insertions(+), 182 deletions(-) diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 7c06bbc..5967d2d 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -74,15 +74,15 @@ type MClock struct { idleTimeout float64 tagInfo map[string]TagInfo - mtx sync.Mutex - previous map[string]*request - inProgress uint64 - lastSchedule float64 - reservationQueue *queue - limitQueue *queue - sharesQueue *queue - readyQueue *queue - closed bool + mtx sync.Mutex + previous map[string]*request + inProgress uint64 + timeBasedScheduleTs float64 + reservationQueue *queue + limitQueue *queue + sharesQueue *queue + readyQueue *queue + closed bool } // NewMClock creates new MClock scheduler instance with @@ -105,10 +105,11 @@ func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeo idleTimeout: idleTimeout, tagInfo: tagInfo, - reservationQueue: &queue{}, - limitQueue: &queue{}, - sharesQueue: &queue{}, - readyQueue: &queue{}, + reservationQueue: &queue{}, + limitQueue: &queue{}, + sharesQueue: &queue{}, + readyQueue: &queue{}, + timeBasedScheduleTs: math.MaxFloat64, } previous := make(map[string]*request) @@ -304,12 +305,16 @@ func (q *MClock) setNextScheduleTimer(now float64) { if q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() < nextTs { nextTs = q.limitQueue.items[0].ts() } + if nextTs <= now { + // should not happen as we always compare .ts() <= now + return + } - if q.lastSchedule < now && q.lastSchedule > nextTs { + if q.timeBasedScheduleTs > nextTs { q.clock.runAt(nextTs, func() { q.scheduleRequest(false) }) - q.lastSchedule = nextTs + q.timeBasedScheduleTs = nextTs } } diff --git a/scheduling/mclock_bench.result b/scheduling/mclock_bench.result index c12f59d..bdd4834 100644 --- a/scheduling/mclock_bench.result +++ b/scheduling/mclock_bench.result @@ -4,169 +4,169 @@ goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz -BenchmarkMClock/impl=noop/parallelism=1-8 8612 138915 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=1-8 8461 140662 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=2-8 8409 140666 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=4-8 8462 139999 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=8-8 7742 141602 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=16-8 8434 144830 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=1-8 8517 140830 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=2-8 8565 141197 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=4-8 8412 140269 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=8-8 8402 140532 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=16-8 8371 140780 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=1-8 8522 139374 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=2-8 8468 141222 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=4-8 8508 139646 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=8-8 7807 141725 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=16-8 8422 141968 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=1-8 8524 142616 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=2-8 8541 141818 ns/op 370 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=4-8 8499 141254 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=8-8 7896 141172 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=16-8 8421 141787 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=1-8 5787 206520 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=2-8 6122 183203 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=4-8 6860 165364 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=8-8 7252 152722 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=16-8 8497 147538 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=1-8 5797 207468 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=2-8 6188 183499 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=4-8 6867 162982 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=8-8 7897 151914 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=16-8 7846 147202 ns/op 374 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=1-8 5733 203388 ns/op 374 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=2-8 6469 181024 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=4-8 7272 164503 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=8-8 7747 152947 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=16-8 7788 148419 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=1-8 5216 202090 ns/op 369 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=2-8 6385 179155 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=4-8 7285 163441 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=8-8 7756 153210 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=16-8 7699 148362 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=noop/parallelism=8-8 8486 140380 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=1-8 8468 141433 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=2-8 8352 141888 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=4-8 8344 142430 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=8-8 8514 142403 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=16-8 8421 140276 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=1-8 8403 141441 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=2-8 8222 144061 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=4-8 8494 141430 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=8-8 7882 144836 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=16-8 8452 142626 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=1-8 8508 142037 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=2-8 7935 140502 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=4-8 8530 141432 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=8-8 8394 141469 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=16-8 8461 142834 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=1-8 7852 141090 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=2-8 8432 140560 ns/op 370 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=4-8 8408 141365 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=8-8 8440 140667 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=16-8 8390 140262 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=1-8 5253 208697 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=2-8 6115 185026 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=4-8 6867 165016 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=8-8 7353 154066 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=16-8 8442 148492 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=1-8 5259 206154 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=2-8 6078 182541 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=4-8 7320 163852 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=8-8 7291 153780 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=16-8 7858 149710 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=1-8 5488 205942 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=2-8 6138 183340 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=4-8 7318 165545 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=8-8 7234 153066 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=16-8 7866 147706 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=1-8 5530 206905 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=2-8 5811 181518 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=4-8 7308 164970 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=8-8 7383 153095 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=16-8 7888 148116 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=noop/parallelism=32-8 8564 139403 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=1-8 8474 141378 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=2-8 8475 140987 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=4-8 8413 141971 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=8-8 8408 140339 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=16-8 8406 140755 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=1-8 8564 139868 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=2-8 8384 140484 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=4-8 8415 140980 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=8-8 8490 141347 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=16-8 7891 139910 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=1-8 8455 139776 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=2-8 8516 139899 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=4-8 8470 142687 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=8-8 7915 139871 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=16-8 8492 140567 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=1-8 8500 140866 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=2-8 8456 140401 ns/op 370 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=4-8 8456 141583 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=8-8 8484 141490 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=16-8 8419 138953 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=1-8 5017 208595 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=2-8 5794 182831 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=4-8 7376 165324 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=8-8 7717 153329 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=16-8 7904 146121 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=1-8 5757 202600 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=2-8 6402 177298 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=4-8 7309 162638 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=8-8 7866 151833 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=16-8 7820 146303 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=1-8 5188 209597 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=2-8 6481 182621 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=4-8 7294 164115 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=8-8 7377 154026 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=16-8 7881 147141 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=1-8 5802 208740 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=2-8 5842 181415 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=4-8 6537 166779 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=8-8 7916 154014 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=16-8 7298 146826 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=noop/parallelism=64-8 8612 139050 ns/op 0 B/op 0 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=1-8 8612 142611 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=2-8 8496 141572 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=4-8 7894 141572 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=8-8 8539 140833 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=16-8 8424 140045 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=1-8 8478 141058 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=2-8 8464 140962 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=4-8 8516 142536 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=8-8 8362 141785 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=16-8 8432 139365 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=1-8 8492 140547 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=2-8 8419 139413 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=4-8 8463 140481 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=8-8 8496 141455 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=16-8 8408 139777 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=1-8 8467 141276 ns/op 371 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=2-8 8481 140046 ns/op 370 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=4-8 7881 141473 ns/op 368 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=8-8 8514 141032 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=16-8 7956 140117 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=1-8 5254 209386 ns/op 366 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=2-8 6103 183966 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=4-8 7315 164810 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=8-8 7387 153669 ns/op 364 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=16-8 7950 148866 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=1-8 6088 206754 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=2-8 6068 183744 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=4-8 6855 164447 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=8-8 7850 151862 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=16-8 8410 147936 ns/op 373 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=1-8 5718 208111 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=2-8 6127 181511 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=4-8 7296 164592 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=8-8 7820 152057 ns/op 372 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=16-8 7815 147004 ns/op 374 B/op 9 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=1-8 5493 208340 ns/op 369 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=2-8 6524 181903 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=4-8 7359 162915 ns/op 367 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=8-8 7855 151878 ns/op 365 B/op 8 allocs/op -BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=16-8 7861 146234 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=1-8 8623 136817 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=1-8 7368 140674 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=2-8 8486 140394 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=4-8 8500 141410 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=8-8 8268 142724 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=1/tags=16-8 8431 142548 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=1-8 8505 142035 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=2-8 7845 142658 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=4-8 8473 140029 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=8-8 8518 142607 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=1/tags=16-8 8578 141002 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=1-8 8557 141858 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=2-8 8353 142742 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=4-8 8475 142753 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=8-8 8433 141319 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=1/tags=16-8 8480 141825 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=1-8 7827 141525 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=2-8 7935 140939 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=4-8 8472 140988 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=8-8 8373 142260 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=1/tags=16-8 8383 142239 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=1-8 5727 206852 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=2-8 6516 178739 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=4-8 7300 163438 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=8-8 7807 152344 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=1/tags=16-8 8443 147051 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=1-8 6062 205018 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=2-8 6526 182511 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=4-8 7341 163028 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=8-8 7930 153741 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=1/tags=16-8 7804 148216 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=1-8 5485 207763 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=2-8 5774 181830 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=4-8 7262 165102 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=8-8 7231 152958 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=1/tags=16-8 7849 146705 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=1-8 5275 206549 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=2-8 6115 180053 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=4-8 7264 163943 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=8-8 7810 152008 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=1/tags=16-8 7875 147107 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=8-8 8589 139356 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=1-8 7916 142917 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=2-8 8392 141914 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=4-8 8444 141011 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=8-8 8419 140638 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=8/tags=16-8 8473 141018 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=1-8 8487 139941 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=2-8 7938 142745 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=4-8 8522 140837 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=8-8 8431 141361 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=8/tags=16-8 8390 142171 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=1-8 8449 140695 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=2-8 8467 140622 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=4-8 8460 140925 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=8-8 8487 141316 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=8/tags=16-8 7876 141374 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=1-8 7887 140590 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=2-8 8328 142214 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=4-8 8475 141472 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=8-8 8402 141861 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=8/tags=16-8 8509 142173 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=1-8 5490 207911 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=2-8 6481 182955 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=4-8 6816 165103 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=8-8 6901 155528 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=8/tags=16-8 7690 148762 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=1-8 5437 205208 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=2-8 6092 183311 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=4-8 6907 162595 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=8-8 7756 151761 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=8/tags=16-8 7855 146382 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=1-8 5468 206883 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=2-8 6061 180350 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=4-8 6795 163866 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=8-8 7350 152345 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=8/tags=16-8 7869 145708 ns/op 374 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=1-8 5283 207099 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=2-8 6799 180029 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=4-8 7324 164306 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=8-8 7770 152377 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=8/tags=16-8 8342 146888 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=32-8 8604 140481 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=1-8 8491 142215 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=2-8 8508 140537 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=4-8 8320 142631 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=8-8 8368 142430 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=32/tags=16-8 8432 141733 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=1-8 7855 141754 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=2-8 7858 141304 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=4-8 8545 140996 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=8-8 8437 142022 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=32/tags=16-8 8418 142653 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=1-8 8448 141117 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=2-8 8530 142164 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=4-8 7944 142449 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=8-8 8551 139223 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=32/tags=16-8 8491 140160 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=1-8 8354 141835 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=2-8 7880 141608 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=4-8 7940 140794 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=8-8 8414 140646 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=32/tags=16-8 8373 140890 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=1-8 5256 209447 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=2-8 6451 183969 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=4-8 7326 163980 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=8-8 7862 152768 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=32/tags=16-8 8390 147437 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=1-8 5228 206086 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=2-8 6471 181844 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=4-8 7318 163604 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=8-8 7827 151880 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=32/tags=16-8 8362 146623 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=1-8 5541 210639 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=2-8 5818 183541 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=4-8 6910 163609 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=8-8 7797 152752 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=32/tags=16-8 7344 146966 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=1-8 5746 206651 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=2-8 6490 182702 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=4-8 7250 164727 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=8-8 7386 152508 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=32/tags=16-8 8379 146547 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=noop/parallelism=64-8 8486 138281 ns/op 0 B/op 0 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=1-8 8472 142782 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=2-8 8437 140925 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=4-8 8338 141035 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=8-8 8487 142288 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=no/parallelism=64/tags=16-8 8366 142353 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=1-8 8510 140838 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=2-8 7935 142844 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=4-8 8218 139362 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=8-8 7977 140291 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=1.0/parallelism=64/tags=16-8 8371 140322 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=1-8 8524 140484 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=2-8 8461 142431 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=4-8 8420 141652 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=8-8 8385 140956 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=100.0/parallelism=64/tags=16-8 8355 142509 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=1-8 7239 141018 ns/op 371 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=2-8 8467 141807 ns/op 370 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=4-8 8420 140763 ns/op 368 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=8-8 8474 140264 ns/op 366 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=no/reservation=10000.0/parallelism=64/tags=16-8 8413 142191 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=1-8 5474 208031 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=2-8 5706 182794 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=4-8 7248 165044 ns/op 364 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=8-8 7825 153229 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=no/parallelism=64/tags=16-8 7879 148568 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=1-8 5278 211267 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=2-8 6108 183247 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=4-8 7338 163152 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=8-8 7339 154054 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=1.0/parallelism=64/tags=16-8 7750 146000 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=1-8 5716 208259 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=2-8 6450 185159 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=4-8 7285 168077 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=8-8 7357 151950 ns/op 372 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=100.0/parallelism=64/tags=16-8 8257 147548 ns/op 373 B/op 9 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=1-8 5245 207383 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=2-8 6115 179041 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=4-8 6831 164377 ns/op 367 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=8-8 7378 152743 ns/op 365 B/op 8 allocs/op +BenchmarkMClock/impl=mclock/limit=100000.0/reservation=10000.0/parallelism=64/tags=16-8 7837 148694 ns/op 366 B/op 8 allocs/op PASS -ok git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling 195.781s +ok git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling 194.532s diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index 7f0edc8..a0b0bd3 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -79,14 +79,17 @@ func TestMClockSharesScheduling(t *testing.T) { var _ clock = &noopClock{} type noopClock struct { - v float64 + v float64 + runAtValue *float64 } func (n *noopClock) now() float64 { return n.v } -func (n *noopClock) runAt(ts float64, f func()) {} +func (n *noopClock) runAt(ts float64, f func()) { + n.runAtValue = &ts +} func (n *noopClock) close() {} @@ -457,3 +460,36 @@ func (q *MClock) waitingCount() int { return q.sharesQueue.Len() } + +func TestMClockTimeBasedSchedule(t *testing.T) { + t.Parallel() + limit := 1.0 // 1 request per second allowed + cl := &noopClock{v: float64(1.5)} + q, err := NewMClock(100, math.MaxUint64, map[string]TagInfo{ + "class1": {Shares: 1, Limit: &limit}, + }, 100) + require.NoError(t, err) + defer q.Close() + q.clock = cl + + running := make(chan struct{}) + checked := make(chan struct{}) + eg, ctx := errgroup.WithContext(context.Background()) + eg.Go(func() error { + release, err := q.RequestArrival(ctx, "class1") + require.NoError(t, err) + defer release() + close(running) + <-checked + return nil + }) + + <-running + // request must be scheduled at 2.0 + _, _, err = q.pushRequest("class1") + require.NoError(t, err) + require.NotNil(t, cl.runAtValue) + require.Equal(t, cl.v+1.0/limit, *cl.runAtValue) + close(checked) + require.NoError(t, eg.Wait()) +} From 47559a8d1653578db72efccb77dc1b93ea0cecf6 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 27 Jan 2025 13:45:00 +0300 Subject: [PATCH 08/29] [#1] mclock: Refactor: split code between files Signed-off-by: Dmitrii Stepanov --- scheduling/clock.go | 81 ++++++++++++++++++++ scheduling/mclock.go | 176 ------------------------------------------- scheduling/queue.go | 100 ++++++++++++++++++++++++ 3 files changed, 181 insertions(+), 176 deletions(-) create mode 100644 scheduling/clock.go create mode 100644 scheduling/queue.go diff --git a/scheduling/clock.go b/scheduling/clock.go new file mode 100644 index 0000000..99e6075 --- /dev/null +++ b/scheduling/clock.go @@ -0,0 +1,81 @@ +package scheduling + +import ( + "sync" + "time" +) + +type clock interface { + now() float64 + runAt(ts float64, f func()) + close() +} + +type scheduleInfo struct { + ts float64 + f func() +} + +type systemClock struct { + since time.Time + schedule chan scheduleInfo + wg sync.WaitGroup +} + +func newSystemClock() *systemClock { + c := &systemClock{ + since: time.Now(), + schedule: make(chan scheduleInfo), + } + c.start() + return c +} + +func (c *systemClock) now() float64 { + return time.Since(c.since).Seconds() +} + +func (c *systemClock) runAt(ts float64, f func()) { + c.schedule <- scheduleInfo{ts: ts, f: f} +} + +func (c *systemClock) close() { + close(c.schedule) + c.wg.Wait() +} + +func (c *systemClock) start() { + c.wg.Add(1) + go func() { + defer c.wg.Done() + t := time.NewTimer(time.Hour) + var f func() + for { + select { + case <-t.C: + if f != nil { + f() + f = nil + } + case s, ok := <-c.schedule: + if !ok { + return + } + now := c.now() + if now >= s.ts { + s.f() + f = nil + continue + } + if !t.Stop() { + select { + case <-t.C: + default: + } + } + t.Reset(time.Duration((s.ts - now) * 1e9)) + f = s.f + } + } + }() +} diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 5967d2d..4cd6ff1 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -6,7 +6,6 @@ import ( "errors" "math" "sync" - "time" ) const ( @@ -22,15 +21,6 @@ var ( ErrInvalidRunLimit = errors.New("invalid run limit: must be greater than zero") ) -type queueItem interface { - ts() float64 - setIndex(idx int) -} - -type queue struct { - items []queueItem -} - type request struct { tag string ts float64 @@ -48,12 +38,6 @@ type request struct { canceled chan struct{} } -type clock interface { - now() float64 - runAt(ts float64, f func()) - close() -} - // ReleaseFunc is the type of function that should be called after the request is completed. type ReleaseFunc func() @@ -424,163 +408,3 @@ func assertIndexInvalid(r *request) { panic("readyIdx is not -1") } } - -// Len implements heap.Interface. -func (q *queue) Len() int { - return len(q.items) -} - -// Less implements heap.Interface. -func (q *queue) Less(i int, j int) bool { - return q.items[i].ts() < q.items[j].ts() -} - -// Pop implements heap.Interface. -func (q *queue) Pop() any { - n := len(q.items) - item := q.items[n-1] - q.items[n-1] = nil - q.items = q.items[0 : n-1] - item.setIndex(invalidIndex) - return item -} - -// Push implements heap.Interface. -func (q *queue) Push(x any) { - it := x.(queueItem) - it.setIndex(q.Len()) - q.items = append(q.items, it) -} - -// Swap implements heap.Interface. -func (q *queue) Swap(i int, j int) { - q.items[i], q.items[j] = q.items[j], q.items[i] - q.items[i].setIndex(i) - q.items[j].setIndex(j) -} - -var _ queueItem = &reservationMQueueItem{} - -type reservationMQueueItem struct { - r *request -} - -func (i *reservationMQueueItem) ts() float64 { - return i.r.reservation -} - -func (i *reservationMQueueItem) setIndex(idx int) { - i.r.reservationIdx = idx -} - -var _ queueItem = &limitMQueueItem{} - -type limitMQueueItem struct { - r *request -} - -func (i *limitMQueueItem) ts() float64 { - return i.r.limit -} - -func (i *limitMQueueItem) setIndex(idx int) { - i.r.limitIdx = idx -} - -var _ queueItem = &sharesMQueueItem{} - -type sharesMQueueItem struct { - r *request -} - -func (i *sharesMQueueItem) ts() float64 { - return i.r.shares -} - -func (i *sharesMQueueItem) setIndex(idx int) { - i.r.sharesIdx = idx -} - -var _ queueItem = &readyMQueueItem{} - -type readyMQueueItem struct { - r *request -} - -func (i *readyMQueueItem) ts() float64 { - return i.r.shares -} - -func (i *readyMQueueItem) setIndex(idx int) { - i.r.readyIdx = idx -} - -type scheduleInfo struct { - ts float64 - f func() -} - -type systemClock struct { - since time.Time - schedule chan scheduleInfo - wg sync.WaitGroup -} - -func newSystemClock() *systemClock { - c := &systemClock{ - since: time.Now(), - schedule: make(chan scheduleInfo), - } - c.start() - return c -} - -func (c *systemClock) now() float64 { - return time.Since(c.since).Seconds() -} - -func (c *systemClock) runAt(ts float64, f func()) { - c.schedule <- scheduleInfo{ts: ts, f: f} -} - -func (c *systemClock) close() { - close(c.schedule) - c.wg.Wait() -} - -func (c *systemClock) start() { - c.wg.Add(1) - go func() { - defer c.wg.Done() - t := time.NewTimer(time.Hour) - var f func() - for { - select { - case <-t.C: - if f != nil { - f() - f = nil - } - t.Reset(time.Hour) - case s, ok := <-c.schedule: - if !ok { - return - } - now := c.now() - if now >= s.ts { - s.f() - f = nil - continue - } - if !t.Stop() { - select { - case <-t.C: - default: - } - } - t.Reset(time.Duration((s.ts - now) * 1e9)) - f = s.f - } - } - }() -} diff --git a/scheduling/queue.go b/scheduling/queue.go new file mode 100644 index 0000000..12dd44a --- /dev/null +++ b/scheduling/queue.go @@ -0,0 +1,100 @@ +package scheduling + +type queueItem interface { + ts() float64 + setIndex(idx int) +} + +type queue struct { + items []queueItem +} + +// Len implements heap.Interface. +func (q *queue) Len() int { + return len(q.items) +} + +// Less implements heap.Interface. +func (q *queue) Less(i int, j int) bool { + return q.items[i].ts() < q.items[j].ts() +} + +// Pop implements heap.Interface. +func (q *queue) Pop() any { + n := len(q.items) + item := q.items[n-1] + q.items[n-1] = nil + q.items = q.items[0 : n-1] + item.setIndex(invalidIndex) + return item +} + +// Push implements heap.Interface. +func (q *queue) Push(x any) { + it := x.(queueItem) + it.setIndex(q.Len()) + q.items = append(q.items, it) +} + +// Swap implements heap.Interface. +func (q *queue) Swap(i int, j int) { + q.items[i], q.items[j] = q.items[j], q.items[i] + q.items[i].setIndex(i) + q.items[j].setIndex(j) +} + +var _ queueItem = &reservationMQueueItem{} + +type reservationMQueueItem struct { + r *request +} + +func (i *reservationMQueueItem) ts() float64 { + return i.r.reservation +} + +func (i *reservationMQueueItem) setIndex(idx int) { + i.r.reservationIdx = idx +} + +var _ queueItem = &limitMQueueItem{} + +type limitMQueueItem struct { + r *request +} + +func (i *limitMQueueItem) ts() float64 { + return i.r.limit +} + +func (i *limitMQueueItem) setIndex(idx int) { + i.r.limitIdx = idx +} + +var _ queueItem = &sharesMQueueItem{} + +type sharesMQueueItem struct { + r *request +} + +func (i *sharesMQueueItem) ts() float64 { + return i.r.shares +} + +func (i *sharesMQueueItem) setIndex(idx int) { + i.r.sharesIdx = idx +} + +var _ queueItem = &readyMQueueItem{} + +type readyMQueueItem struct { + r *request +} + +func (i *readyMQueueItem) ts() float64 { + return i.r.shares +} + +func (i *readyMQueueItem) setIndex(idx int) { + i.r.readyIdx = idx +} From f1cb5b40d52b74bddc1194c676d4b65432972c55 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 27 Jan 2025 16:46:38 +0300 Subject: [PATCH 09/29] [#1] mclock: Use `time.Duration` for idle timeout Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 7 ++++--- scheduling/mclock_bench_test.go | 2 +- scheduling/mclock_test.go | 10 +++++----- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 4cd6ff1..d50e60e 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -6,6 +6,7 @@ import ( "errors" "math" "sync" + "time" ) const ( @@ -74,11 +75,11 @@ type MClock struct { // waitLimit maximum allowed count of waiting requests // for tags specified by tagInfo. The value of idleTimeout defines // the difference between the current time and the time of -// the previous request in seconds, at which the tag considered idle. +// the previous request, at which the tag considered idle. // If idleTimeout is negative, it means that there is no idle tags allowed. // If waitLimit equals zero, it means that there is no limit on the // number of waiting requests. -func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeout float64) (*MClock, error) { +func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeout time.Duration) (*MClock, error) { if err := validateParams(runLimit, tagInfo); err != nil { return nil, err } @@ -86,7 +87,7 @@ func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeo runLimit: runLimit, waitLimit: int(waitLimit), clock: newSystemClock(), - idleTimeout: idleTimeout, + idleTimeout: idleTimeout.Seconds(), tagInfo: tagInfo, reservationQueue: &queue{}, diff --git a/scheduling/mclock_bench_test.go b/scheduling/mclock_bench_test.go index f18f421..09989e6 100644 --- a/scheduling/mclock_bench_test.go +++ b/scheduling/mclock_bench_test.go @@ -56,7 +56,7 @@ func BenchmarkMClock(b *testing.B) { tagInfos["tag"+strconv.FormatInt(int64(tag), 10)] = TagInfo{Share: 50, LimitIOPS: limit, ReservedIOPS: reservation} } - mClockQ, _ := NewMClock(math.MaxUint64, math.MaxUint64, tagInfos, math.MaxFloat64) + mClockQ, _ := NewMClock(math.MaxUint64, math.MaxUint64, tagInfos, time.Hour) resStr := "no" if reservation != nil { diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index a0b0bd3..9cebe05 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -275,7 +275,7 @@ func TestMClockReservationScheduling(t *testing.T) { func TestMClockIdleTag(t *testing.T) { t.Parallel() reqCount := 100 - idleTimeout := 2.0 + idleTimeout := 2 * time.Second cl := &noopClock{} q, err := NewMClock(1, math.MaxUint64, map[string]TagInfo{ "class1": {Share: 1}, @@ -287,7 +287,7 @@ func TestMClockIdleTag(t *testing.T) { var requests []*request tag := "class1" for i := 0; i < reqCount/2; i++ { - cl.v += idleTimeout / 2 + cl.v += idleTimeout.Seconds() / 2 req, _, err := q.pushRequest(tag) require.NoError(t, err) requests = append(requests, req) @@ -295,7 +295,7 @@ func TestMClockIdleTag(t *testing.T) { // class1 requests have shares [1.0; 2.0; 3.0; ... ] - cl.v += 2 * idleTimeout + cl.v += 2 * idleTimeout.Seconds() tag = "class2" req, _, err := q.pushRequest(tag) @@ -424,7 +424,7 @@ func TestMClockParameterValidation(t *testing.T) { require.NoError(t, err) _, err = NewMClock(1, 1, map[string]TagInfo{ "class1": {Share: 1}, - }, float64(0)) + }, 0) require.NoError(t, err) negativeValue := -1.0 zeroValue := float64(0) @@ -466,7 +466,7 @@ func TestMClockTimeBasedSchedule(t *testing.T) { limit := 1.0 // 1 request per second allowed cl := &noopClock{v: float64(1.5)} q, err := NewMClock(100, math.MaxUint64, map[string]TagInfo{ - "class1": {Shares: 1, Limit: &limit}, + "class1": {Share: 1, LimitIOPS: &limit}, }, 100) require.NoError(t, err) defer q.Close() From d8663f1a74d810e03090813a9d92fcc7adf4f36f Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 28 Jan 2025 10:43:41 +0300 Subject: [PATCH 10/29] [#1] mclock: Fix possible deadlock There is a possible call-chain `scheduleRequest()` -> `runAt()` -> `scheduleRequest()`, so second `scheduleRequest()` may be locked on mutext held by first `scheduleRequest()`. Signed-off-by: Dmitrii Stepanov --- scheduling/clock.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/scheduling/clock.go b/scheduling/clock.go index 99e6075..9fe66bd 100644 --- a/scheduling/clock.go +++ b/scheduling/clock.go @@ -36,7 +36,10 @@ func (c *systemClock) now() float64 { } func (c *systemClock) runAt(ts float64, f func()) { - c.schedule <- scheduleInfo{ts: ts, f: f} + select { + case c.schedule <- scheduleInfo{ts: ts, f: f}: + default: // timer fired, scheduleRequest will call runAt again + } } func (c *systemClock) close() { @@ -48,7 +51,8 @@ func (c *systemClock) start() { c.wg.Add(1) go func() { defer c.wg.Done() - t := time.NewTimer(time.Hour) + t := time.NewTimer(0) + <-t.C var f func() for { select { @@ -61,11 +65,10 @@ func (c *systemClock) start() { if !ok { return } + var d time.Duration now := c.now() - if now >= s.ts { - s.f() - f = nil - continue + if now < s.ts { + d = time.Duration((s.ts - now) * 1e9) } if !t.Stop() { select { @@ -73,7 +76,7 @@ func (c *systemClock) start() { default: } } - t.Reset(time.Duration((s.ts - now) * 1e9)) + t.Reset(d) f = s.f } } From 9a48a50220eae676b61a72c5cc4199100d96f901 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 28 Jan 2025 11:23:07 +0300 Subject: [PATCH 11/29] [#1] mclock: Refactor `scheduleRequest` Split to `scheduleRequest` and `scheduleRequestUnsafe`. Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 18 ++++++++++-------- scheduling/mclock_test.go | 10 +++++----- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/scheduling/mclock.go b/scheduling/mclock.go index d50e60e..a38f6f3 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -236,7 +236,7 @@ func (q *MClock) pushRequest(tag string) (*request, ReleaseFunc, error) { } heap.Push(q.sharesQueue, &sharesMQueueItem{r: r}) heap.Push(q.limitQueue, &limitMQueueItem{r: r}) - q.scheduleRequest(true) + q.scheduleRequestUnsafe() return r, q.requestCompleted, nil } @@ -261,12 +261,14 @@ func (q *MClock) adjustTags(now float64, idleTag string) { } } -func (q *MClock) scheduleRequest(lockTaken bool) { - if !lockTaken { - q.mtx.Lock() - defer q.mtx.Unlock() - } +func (q *MClock) scheduleRequest() { + q.mtx.Lock() + defer q.mtx.Unlock() + q.scheduleRequestUnsafe() +} + +func (q *MClock) scheduleRequestUnsafe() { if q.inProgress >= q.runLimit { return } @@ -297,7 +299,7 @@ func (q *MClock) setNextScheduleTimer(now float64) { if q.timeBasedScheduleTs > nextTs { q.clock.runAt(nextTs, func() { - q.scheduleRequest(false) + q.scheduleRequest() }) q.timeBasedScheduleTs = nextTs } @@ -392,7 +394,7 @@ func (q *MClock) requestCompleted() { panic("invalid requests count") } q.inProgress-- - q.scheduleRequest(true) + q.scheduleRequestUnsafe() } func assertIndexInvalid(r *request) { diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index 9cebe05..3aa261f 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -149,7 +149,7 @@ func TestMClockLimitScheduling(t *testing.T) { releases = append(releases, release) } - q.scheduleRequest(false) + q.scheduleRequest() for _, req := range requests { select { @@ -172,7 +172,7 @@ func TestMClockLimitScheduling(t *testing.T) { releases[i]() }() } - q.scheduleRequest(false) + q.scheduleRequest() wg.Wait() // Requests must be scheduled as class1->class1->class2->class1->class1->class2..., @@ -235,7 +235,7 @@ func TestMClockReservationScheduling(t *testing.T) { releases = append(releases, release) } - q.scheduleRequest(false) + q.scheduleRequest() for _, req := range requests { select { @@ -246,7 +246,7 @@ func TestMClockReservationScheduling(t *testing.T) { } cl.v = 1.00001 // 1s elapsed - q.scheduleRequest(false) + q.scheduleRequest() var result []string for i, req := range requests { @@ -264,7 +264,7 @@ func TestMClockReservationScheduling(t *testing.T) { } cl.v = math.MaxFloat64 - q.scheduleRequest(false) + q.scheduleRequest() require.Equal(t, 0, q.readyQueue.Len()) require.Equal(t, 0, q.sharesQueue.Len()) From f4d8ebf13db79b7a3f6eba60ddca681c023d55fc Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 28 Jan 2025 11:35:54 +0300 Subject: [PATCH 12/29] [#1] mclock: Add assert package Signed-off-by: Dmitrii Stepanov --- internal/assert/cond.go | 9 +++++++++ scheduling/mclock.go | 34 ++++++++++------------------------ 2 files changed, 19 insertions(+), 24 deletions(-) create mode 100644 internal/assert/cond.go diff --git a/internal/assert/cond.go b/internal/assert/cond.go new file mode 100644 index 0000000..4a1b201 --- /dev/null +++ b/internal/assert/cond.go @@ -0,0 +1,9 @@ +package assert + +import "strings" + +func Cond(cond bool, details ...string) { + if !cond { + panic(strings.Join(details, " ")) + } +} diff --git a/scheduling/mclock.go b/scheduling/mclock.go index a38f6f3..82037d6 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -7,6 +7,8 @@ import ( "math" "sync" "time" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/internal/assert" ) const ( @@ -172,9 +174,7 @@ func (q *MClock) dropRequest(req *request) { select { case <-req.scheduled: - if q.inProgress == 0 { - panic("invalid requests count") - } + assert.Cond(q.inProgress > 0, "invalid requests count") q.inProgress-- default: } @@ -199,9 +199,7 @@ func (q *MClock) pushRequest(tag string) (*request, ReleaseFunc, error) { return nil, nil, ErrMClockSchedulerUnknownTag } prev, ok := q.previous[tag] - if !ok { - panic("undefined previous: " + tag) - } + assert.Cond(ok, "undefined previous:", tag) if q.idleTimeout >= 0 && now-prev.ts > q.idleTimeout { // was inactive for q.idleTimeout q.adjustTags(now, tag) @@ -321,9 +319,7 @@ func (q *MClock) scheduleByLimitAndWeight(now float64) { q.removeFromQueues(next.r) tagInfo, ok := q.tagInfo[next.r.tag] - if !ok { - panic("unknown tag: " + next.r.tag) // must be checked on top level - } + assert.Cond(ok, "unknown tag:", next.r.tag) if tagInfo.ReservedIOPS != nil && hadReservation { var updated bool for _, i := range q.reservationQueue.items { @@ -390,24 +386,14 @@ func (q *MClock) requestCompleted() { return } - if q.inProgress == 0 { - panic("invalid requests count") - } + assert.Cond(q.inProgress > 0, "invalid requests count") q.inProgress-- q.scheduleRequestUnsafe() } func assertIndexInvalid(r *request) { - if r.limitIdx != invalidIndex { - panic("limitIdx is not -1") - } - if r.sharesIdx != invalidIndex { - panic("sharesIdx is not -1") - } - if r.reservationIdx != invalidIndex { - panic("reservationIdx is not -1") - } - if r.readyIdx != invalidIndex { - panic("readyIdx is not -1") - } + assert.Cond(r.limitIdx == invalidIndex, "limitIdx is not -1") + assert.Cond(r.sharesIdx == invalidIndex, "sharesIdx is not -1") + assert.Cond(r.reservationIdx == invalidIndex, "reservationIdx is not -1") + assert.Cond(r.readyIdx == invalidIndex, "readyIdx is not -1") } From 1fb8b137c54310f46f41001221f919a43d720964 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 21 Jan 2025 09:56:13 +0300 Subject: [PATCH 13/29] [#2] tagging: Add grpc and context methods Signed-off-by: Dmitrii Stepanov --- go.mod | 9 +++- go.sum | 42 ++++++++++++++++++ tagging/context.go | 21 +++++++++ tagging/context_test.go | 23 ++++++++++ tagging/grpc.go | 96 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 190 insertions(+), 1 deletion(-) create mode 100644 tagging/context.go create mode 100644 tagging/context_test.go create mode 100644 tagging/grpc.go diff --git a/go.mod b/go.mod index f3e6160..35f0342 100644 --- a/go.mod +++ b/go.mod @@ -4,11 +4,18 @@ go 1.22 require ( github.com/stretchr/testify v1.9.0 - golang.org/x/sync v0.10.0 + google.golang.org/grpc v1.69.2 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +require golang.org/x/sync v0.10.0 diff --git a/go.sum b/go.sum index 8c93871..7f2985d 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,47 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= diff --git a/tagging/context.go b/tagging/context.go new file mode 100644 index 0000000..3b6c113 --- /dev/null +++ b/tagging/context.go @@ -0,0 +1,21 @@ +package tagging + +import "context" + +type tagContextKeyType struct{} + +var currentTagKey = tagContextKeyType{} + +func ContextWithIOTag(parent context.Context, ioTag string) context.Context { + return context.WithValue(parent, currentTagKey, ioTag) +} + +func IOTagFromContext(ctx context.Context) (string, bool) { + if ctx == nil { + panic("context must be non nil") + } + if tag, ok := ctx.Value(currentTagKey).(string); ok { + return tag, true + } + return "", false +} diff --git a/tagging/context_test.go b/tagging/context_test.go new file mode 100644 index 0000000..b13b253 --- /dev/null +++ b/tagging/context_test.go @@ -0,0 +1,23 @@ +package tagging + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestContext(t *testing.T) { + ctx := context.Background() + tag, ok := IOTagFromContext(ctx) + require.False(t, ok) + require.Equal(t, "", tag) + ctx = ContextWithIOTag(ctx, "tag1") + tag, ok = IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, "tag1", tag) + ctx = ContextWithIOTag(ctx, "tag2") + tag, ok = IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, "tag2", tag) +} diff --git a/tagging/grpc.go b/tagging/grpc.go new file mode 100644 index 0000000..5e255dd --- /dev/null +++ b/tagging/grpc.go @@ -0,0 +1,96 @@ +package tagging + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + ioTagHeader = "x-frostfs-io-tag" +) + +// NewUnaryClientInteceptor creates new gRPC unary interceptor to set an IO tag to gRPC metadata. +func NewUnaryClientInteceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return invoker(setIOTagToGRPCMetadata(ctx), method, req, reply, cc, opts...) + } +} + +// NewStreamClientInterceptor creates new gRPC stream interceptor to set an IO tag to gRPC metadata. +func NewStreamClientInterceptor() grpc.StreamClientInterceptor { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return streamer(setIOTagToGRPCMetadata(ctx), desc, cc, method, opts...) + } +} + +// NewUnaryServerInterceptor creates new gRPC unary interceptor to extract an IO tag to gRPC metadata. +func NewUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + return handler(extractIOTagFromGRPCMetadata(ctx), req) + } +} + +// NewStreamServerInterceptor creates new gRPC stream interceptor to extract an IO tag to gRPC metadata. +func NewStreamServerInterceptor() grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return handler(srv, &serverStream{origin: ss}) + } +} + +func setIOTagToGRPCMetadata(ctx context.Context) context.Context { + ioTag, ok := IOTagFromContext(ctx) + if !ok { + return ctx + } + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } + md.Set(ioTagHeader, ioTag) + return metadata.NewOutgoingContext(ctx, md) +} + +func extractIOTagFromGRPCMetadata(ctx context.Context) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return ctx + } + + values := md.Get(ioTagHeader) + if len(values) > 0 { + return ContextWithIOTag(ctx, values[0]) + } + return ctx +} + +var _ grpc.ServerStream = &serverStream{} + +type serverStream struct { + origin grpc.ServerStream +} + +func (s *serverStream) Context() context.Context { + return extractIOTagFromGRPCMetadata(s.origin.Context()) +} + +func (s *serverStream) RecvMsg(m any) error { + return s.origin.RecvMsg(m) +} + +func (s *serverStream) SendHeader(md metadata.MD) error { + return s.origin.SendHeader(md) +} + +func (s *serverStream) SendMsg(m any) error { + return s.origin.SendMsg(m) +} + +func (s *serverStream) SetHeader(md metadata.MD) error { + return s.origin.SetHeader(md) +} + +func (s *serverStream) SetTrailer(md metadata.MD) { + s.origin.SetTrailer(md) +} From 128933faded216d0d04bc8535a7d94106c4ec848 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 21 Jan 2025 10:28:02 +0300 Subject: [PATCH 14/29] [#2] Add CODEOWNERS Signed-off-by: Dmitrii Stepanov --- CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..b6fa647 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @fyrchik +.forgejo/.* @potyarkin +Makefile @potyarkin From cfbca7fa1dfef78bc844bb9e67c03fe55731583a Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 28 Jan 2025 17:43:16 +0300 Subject: [PATCH 15/29] [#3] govulncheck: Use patch release with security fixes https://go.dev/doc/devel/release#go1.23.minor Signed-off-by: Vitaliy Potyarkin --- .forgejo/workflows/vulncheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index cf15005..8a5a818 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.23.5' - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest From 30e83428fdf113372d6fe3aaf453f5fe322222eb Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Mon, 10 Feb 2025 18:11:18 +0300 Subject: [PATCH 16/29] [#5] govulncheck: Fix minor update problems for good Signed-off-by: Vitaliy Potyarkin --- .forgejo/workflows/vulncheck.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 8a5a818..140434d 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23.5' + go-version: '1.23' + check-latest: true - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest From 59fb93fb234f456bec510e29bdc40ab00f76a4c3 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 4 Feb 2025 17:32:01 +0300 Subject: [PATCH 17/29] [#4] limiting: Add limiter Signed-off-by: Aleksey Savchuk --- limiting/limiter.go | 75 +++++++++++++++++ limiting/limiter_test.go | 138 ++++++++++++++++++++++++++++++++ limiting/semaphore/semaphore.go | 27 +++++++ 3 files changed, 240 insertions(+) create mode 100644 limiting/limiter.go create mode 100644 limiting/limiter_test.go create mode 100644 limiting/semaphore/semaphore.go diff --git a/limiting/limiter.go b/limiting/limiter.go new file mode 100644 index 0000000..4d29a71 --- /dev/null +++ b/limiting/limiter.go @@ -0,0 +1,75 @@ +package limiting + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" +) + +type ReleaseFunc func() + +type Limiter interface { + // Acquire attempts to reserve a slot without blocking. + // + // Returns a release function and true if successful. The function must be + // called to release the limiter. The function must be called exactly once. + // Calling the function more that once will cause incorrect behavior of the + // limiter. + // + // Returns nil and false if fails. + // + // If the key was not defined in the limiter, no limit is applied. + Acquire(key string) (ReleaseFunc, bool) +} + +type SemaphoreLimiter struct { + m map[string]*semaphore.Semaphore +} + +// KeyLimit defines a concurrency limit for a set of keys. +// +// All keys of one set share the same limit. +// Keys of different sets have separate limits. +// +// Sets must not overlap. +type KeyLimit struct { + Keys []string + Limit int64 +} + +func NewSemaphoreLimiter(limits []KeyLimit) (*SemaphoreLimiter, error) { + lr := SemaphoreLimiter{make(map[string]*semaphore.Semaphore)} + for _, limit := range limits { + if limit.Limit < 0 { + return nil, fmt.Errorf("invalid limit %d", limit.Limit) + } + sem := semaphore.NewSemaphore(limit.Limit) + + if err := lr.addLimit(&limit, sem); err != nil { + return nil, err + } + } + return &lr, nil +} + +func (lr *SemaphoreLimiter) addLimit(limit *KeyLimit, sem *semaphore.Semaphore) error { + for _, key := range limit.Keys { + if _, exists := lr.m[key]; exists { + return fmt.Errorf("duplicate key %q", key) + } + lr.m[key] = sem + } + return nil +} + +func (lr *SemaphoreLimiter) Acquire(key string) (ReleaseFunc, bool) { + sem, ok := lr.m[key] + if !ok { + return func() {}, true + } + + if ok := sem.Acquire(); ok { + return func() { sem.Release() }, true + } + return nil, false +} diff --git a/limiting/limiter_test.go b/limiting/limiter_test.go new file mode 100644 index 0000000..c6087f1 --- /dev/null +++ b/limiting/limiter_test.go @@ -0,0 +1,138 @@ +package limiting_test + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "github.com/stretchr/testify/require" +) + +const ( + operationDuration = 10 * time.Millisecond + operationCount = 64 +) + +type testCase struct { + keys []string + limit int64 + withoutLimit bool + failCount atomic.Int64 +} + +func TestLimiter(t *testing.T) { + testLimiter(t, func(kl []limiting.KeyLimit) (limiting.Limiter, error) { + return limiting.NewSemaphoreLimiter(kl) + }) +} + +func testLimiter(t *testing.T, getLimiter func([]limiting.KeyLimit) (limiting.Limiter, error)) { + t.Run("duplicate key", func(t *testing.T) { + _, err := getLimiter([]limiting.KeyLimit{ + {[]string{"A", "B"}, 10}, + {[]string{"B", "C"}, 10}, + }) + require.Error(t, err) + }) + + testCases := []*testCase{ + {keys: []string{"A"}, limit: operationCount / 4}, + {keys: []string{"B"}, limit: operationCount / 2}, + {keys: []string{"C", "D"}, limit: operationCount / 4}, + {keys: []string{"E"}, limit: 2 * operationCount}, + {keys: []string{"F"}, withoutLimit: true}, + } + + lr, err := getLimiter(getLimits(testCases)) + require.NoError(t, err) + + tasks := createTestTasks(testCases, lr) + + t.Run("first run", func(t *testing.T) { + executeTasks(tasks...) + verifyResults(t, testCases) + }) + + resetFailCounts(testCases) + + t.Run("repeated run", func(t *testing.T) { + executeTasks(tasks...) + verifyResults(t, testCases) + }) +} + +func getLimits(testCases []*testCase) []limiting.KeyLimit { + var limits []limiting.KeyLimit + for _, tc := range testCases { + if tc.withoutLimit { + continue + } + limits = append(limits, limiting.KeyLimit{ + Keys: tc.keys, + Limit: int64(tc.limit), + }) + } + return limits +} + +func createTestTasks(testCases []*testCase, lr limiting.Limiter) []func() { + var tasks []func() + for _, tc := range testCases { + for _, key := range tc.keys { + tasks = append(tasks, func() { + executeTaskN(operationCount, func() { acquireAndExecute(tc, lr, key) }) + }) + } + } + return tasks +} + +func acquireAndExecute(tc *testCase, lr limiting.Limiter, key string) { + release, ok := lr.Acquire(key) + if !ok { + tc.failCount.Add(1) + return + } + defer release() + time.Sleep(operationDuration) +} + +func executeTasks(tasks ...func()) { + var g sync.WaitGroup + + g.Add(len(tasks)) + for _, task := range tasks { + go func() { + defer g.Done() + task() + }() + } + g.Wait() +} + +func executeTaskN(N int, task func()) { + tasks := make([]func(), N) + for i := range N { + tasks[i] = task + } + executeTasks(tasks...) +} + +func verifyResults(t *testing.T, testCases []*testCase) { + for _, tc := range testCases { + var expectedFailCount int64 + if !tc.withoutLimit { + numKeys := int64(len(tc.keys)) + expectedFailCount = max(operationCount*numKeys-tc.limit, 0) + } + require.Equal(t, expectedFailCount, tc.failCount.Load()) + } +} + +func resetFailCounts(testCases []*testCase) { + for _, tc := range testCases { + tc.failCount.Store(0) + } +} diff --git a/limiting/semaphore/semaphore.go b/limiting/semaphore/semaphore.go new file mode 100644 index 0000000..c43dfc6 --- /dev/null +++ b/limiting/semaphore/semaphore.go @@ -0,0 +1,27 @@ +package semaphore + +import ( + "sync/atomic" +) + +type Semaphore struct { + count atomic.Int64 + limit int64 +} + +func NewSemaphore(size int64) *Semaphore { + return &Semaphore{limit: size} +} + +func (s *Semaphore) Acquire() bool { + v := s.count.Add(1) + if v > s.limit { + s.count.Add(-1) + return false + } + return true +} + +func (s *Semaphore) Release() { + s.count.Add(-1) +} From 356851eed3bf77e13c87bca9606935c8e7c58769 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Thu, 13 Feb 2025 14:25:50 +0300 Subject: [PATCH 18/29] [#4] limiting/semaphore: Add benchmark for semaphore Signed-off-by: Aleksey Savchuk --- limiting/semaphore/semaphore_bench.result | 26 ++++++ limiting/semaphore/semaphore_bench_test.go | 95 ++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 limiting/semaphore/semaphore_bench.result create mode 100644 limiting/semaphore/semaphore_bench_test.go diff --git a/limiting/semaphore/semaphore_bench.result b/limiting/semaphore/semaphore_bench.result new file mode 100644 index 0000000..5883c60 --- /dev/null +++ b/limiting/semaphore/semaphore_bench.result @@ -0,0 +1,26 @@ +goos: linux +goarch: amd64 +pkg: git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore +cpu: 12th Gen Intel(R) Core(TM) i5-1235U +BenchmarkSemaphore/semaphore_size=1/lock_duration=0s-12 6113605 1964 ns/op 383.1 acquire-ns/op 203.5 release-ns/op 0.6892 success-rate +BenchmarkSemaphore/semaphore_size=1/lock_duration=1µs-12 5826655 2067 ns/op 382.0 acquire-ns/op 307.0 release-ns/op 0.1460 success-rate +BenchmarkSemaphore/semaphore_size=1/lock_duration=10µs-12 5977272 2033 ns/op 370.4 acquire-ns/op 321.4 release-ns/op 0.05408 success-rate +BenchmarkSemaphore/semaphore_size=1/lock_duration=100µs-12 5862900 2030 ns/op 365.6 acquire-ns/op 343.1 release-ns/op 0.01242 success-rate +BenchmarkSemaphore/semaphore_size=10/lock_duration=0s-12 5637050 2173 ns/op 365.2 acquire-ns/op 261.7 release-ns/op 0.9765 success-rate +BenchmarkSemaphore/semaphore_size=10/lock_duration=1µs-12 5470316 2225 ns/op 390.4 acquire-ns/op 357.2 release-ns/op 0.9249 success-rate +BenchmarkSemaphore/semaphore_size=10/lock_duration=10µs-12 5584527 2134 ns/op 395.2 acquire-ns/op 339.0 release-ns/op 0.5409 success-rate +BenchmarkSemaphore/semaphore_size=10/lock_duration=100µs-12 5841032 2036 ns/op 369.4 acquire-ns/op 330.7 release-ns/op 0.1182 success-rate +BenchmarkSemaphore/semaphore_size=100/lock_duration=0s-12 5600013 2159 ns/op 369.9 acquire-ns/op 271.1 release-ns/op 0.9976 success-rate +BenchmarkSemaphore/semaphore_size=100/lock_duration=1µs-12 5323606 2280 ns/op 394.0 acquire-ns/op 368.9 release-ns/op 0.9697 success-rate +BenchmarkSemaphore/semaphore_size=100/lock_duration=10µs-12 5133394 2353 ns/op 405.8 acquire-ns/op 374.5 release-ns/op 0.9498 success-rate +BenchmarkSemaphore/semaphore_size=100/lock_duration=100µs-12 5238136 2303 ns/op 387.2 acquire-ns/op 362.2 release-ns/op 0.8749 success-rate +BenchmarkSemaphore/semaphore_size=1000/lock_duration=0s-12 5408720 2180 ns/op 367.6 acquire-ns/op 271.5 release-ns/op 0.9992 success-rate +BenchmarkSemaphore/semaphore_size=1000/lock_duration=1µs-12 5114854 2366 ns/op 407.9 acquire-ns/op 376.4 release-ns/op 0.9966 success-rate +BenchmarkSemaphore/semaphore_size=1000/lock_duration=10µs-12 4659454 2438 ns/op 412.2 acquire-ns/op 385.9 release-ns/op 0.9800 success-rate +BenchmarkSemaphore/semaphore_size=1000/lock_duration=100µs-12 4837894 2482 ns/op 401.7 acquire-ns/op 380.9 release-ns/op 0.9725 success-rate +BenchmarkSemaphore/semaphore_size=10000/lock_duration=0s-12 5403058 2188 ns/op 367.5 acquire-ns/op 273.1 release-ns/op 1.000 success-rate +BenchmarkSemaphore/semaphore_size=10000/lock_duration=1µs-12 5086929 2306 ns/op 390.6 acquire-ns/op 376.3 release-ns/op 1.000 success-rate +BenchmarkSemaphore/semaphore_size=10000/lock_duration=10µs-12 5059968 2378 ns/op 410.2 acquire-ns/op 384.5 release-ns/op 1.000 success-rate +BenchmarkSemaphore/semaphore_size=10000/lock_duration=100µs-12 4909206 2420 ns/op 408.4 acquire-ns/op 383.4 release-ns/op 1.000 success-rate +PASS +ok git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore 284.895s diff --git a/limiting/semaphore/semaphore_bench_test.go b/limiting/semaphore/semaphore_bench_test.go new file mode 100644 index 0000000..f4837e8 --- /dev/null +++ b/limiting/semaphore/semaphore_bench_test.go @@ -0,0 +1,95 @@ +package semaphore_test + +import ( + "fmt" + "sync" + "testing" + "time" + + semaphores "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +const maxWorkers = 10_000_000 + +type benchmarkSemaphoreMetrics struct { + mu sync.Mutex + + acquireDuration, + releaseDuration time.Duration + + acquireCount, + releaseCount uint64 +} + +func (c *benchmarkSemaphoreMetrics) reportAcquire(duration time.Duration) { + c.mu.Lock() + defer c.mu.Unlock() + c.acquireDuration += duration + c.acquireCount += 1 +} + +func (c *benchmarkSemaphoreMetrics) reportRelease(duration time.Duration) { + c.mu.Lock() + defer c.mu.Unlock() + c.releaseDuration += duration + c.releaseCount += 1 +} + +func (c *benchmarkSemaphoreMetrics) getResults() (timePerAcquire, timePerRelease, successRate float64) { + timePerAcquire = float64(c.acquireDuration) / float64(c.acquireCount) + timePerRelease = float64(c.releaseDuration) / float64(c.releaseCount) + successRate = float64(c.releaseCount) / float64(c.acquireCount) + return +} + +func BenchmarkSemaphore(b *testing.B) { + sizes := []int64{1, 10, 100, 1000, 10000} + lockDurations := []time.Duration{0, time.Microsecond, 10 * time.Microsecond, 100 * time.Microsecond} + + for _, size := range sizes { + for _, lockDuration := range lockDurations { + name := fmt.Sprintf("semaphore_size=%d/lock_duration=%v", size, lockDuration) + b.Run(name, func(b *testing.B) { + benchmarkSemaphore(b, semaphores.NewSemaphore(size), lockDuration) + }) + } + } +} + +func benchmarkSemaphore(b *testing.B, sem *semaphores.Semaphore, lockDuration time.Duration) { + var m benchmarkSemaphoreMetrics + var g errgroup.Group + g.SetLimit(maxWorkers) + + for range b.N { + g.Go(func() error { + now := time.Now() + ok := sem.Acquire() + m.reportAcquire(time.Since(now)) + + if !ok { + return nil + } + + time.Sleep(lockDuration) + + now = time.Now() + sem.Release() + m.reportRelease(time.Since(now)) + + return nil + }) + } + require.NoError(b, g.Wait()) + + require.Equal(b, uint64(b.N), m.acquireCount) + require.LessOrEqual(b, m.releaseCount, m.acquireCount) + + timePerAcquire, timePerRelease, successRate := m.getResults() + + b.ReportMetric(timePerAcquire, "acquire-ns/op") + b.ReportMetric(timePerRelease, "release-ns/op") + b.ReportMetric(successRate, "success-rate") +} From cafa869fea3bef8c0c2ea784a1d6199610b7578c Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 26 Feb 2025 13:18:40 +0300 Subject: [PATCH 19/29] [#8] limiting: Make SemaphoreLimiter.Acquire() zero-alloc Previously, `Acquire` on exising key did 1 allocation because `func() { sem.Release() }` was a closure capturing different variables. Signed-off-by: Evgenii Stratonikov --- limiting/limiter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/limiting/limiter.go b/limiting/limiter.go index 4d29a71..0f550f8 100644 --- a/limiting/limiter.go +++ b/limiting/limiter.go @@ -69,7 +69,7 @@ func (lr *SemaphoreLimiter) Acquire(key string) (ReleaseFunc, bool) { } if ok := sem.Acquire(); ok { - return func() { sem.Release() }, true + return sem.Release, true } return nil, false } From 25102d1e1aa3e9232af803ce3cd0088c37263de3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 18 Feb 2025 15:36:58 +0300 Subject: [PATCH 20/29] [#7] mclock: Add tag stat Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 46 +++++++++++- scheduling/mclock_test.go | 150 ++++++++++++++++++++++++++++++++++++++ scheduling/stat.go | 20 +++++ 3 files changed, 214 insertions(+), 2 deletions(-) create mode 100644 scheduling/stat.go diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 82037d6..213b3cf 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -14,6 +14,7 @@ import ( const ( invalidIndex = -1 undefinedReservation float64 = -1.0 + minusOne = ^uint64(0) ) var ( @@ -60,6 +61,8 @@ type MClock struct { clock clock idleTimeout float64 tagInfo map[string]TagInfo + tagStat map[string]*Stat + stats []*Stat mtx sync.Mutex previous map[string]*request @@ -110,6 +113,16 @@ func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeo } result.previous = previous + result.tagStat = make(map[string]*Stat, len(tagInfo)) + result.stats = make([]*Stat, 0, len(tagInfo)) + for tag := range tagInfo { + s := &Stat{ + tag: tag, + } + result.tagStat[tag] = s + result.stats = append(result.stats, s) + } + return result, nil } @@ -150,6 +163,12 @@ func (q *MClock) Close() { } } +// Stats returns per tag stat. +// Returned slice should not be modified. +func (q *MClock) Stats() []*Stat { + return q.stats +} + func validateParams(runLimit uint64, tagInfo map[string]TagInfo) error { if runLimit == 0 { return ErrInvalidRunLimit @@ -172,11 +191,16 @@ func (q *MClock) dropRequest(req *request) { q.mtx.Lock() defer q.mtx.Unlock() + s, ok := q.tagStat[req.tag] + assert.Cond(ok, "undefined stat tag:", req.tag) + select { case <-req.scheduled: assert.Cond(q.inProgress > 0, "invalid requests count") q.inProgress-- + s.inProgress.Add(minusOne) default: + s.pending.Add(minusOne) } q.removeFromQueues(req) @@ -234,9 +258,14 @@ func (q *MClock) pushRequest(tag string) (*request, ReleaseFunc, error) { } heap.Push(q.sharesQueue, &sharesMQueueItem{r: r}) heap.Push(q.limitQueue, &limitMQueueItem{r: r}) + + s, ok := q.tagStat[tag] + assert.Cond(ok, "undefined stat tag:", tag) + s.pending.Add(1) + q.scheduleRequestUnsafe() - return r, q.requestCompleted, nil + return r, func() { q.requestCompleted(tag) }, nil } func (q *MClock) adjustTags(now float64, idleTag string) { @@ -318,6 +347,10 @@ func (q *MClock) scheduleByLimitAndWeight(now float64) { } q.removeFromQueues(next.r) + s, ok := q.tagStat[next.r.tag] + assert.Cond(ok, "undefined stat tag:", next.r.tag) + s.pending.Add(minusOne) + tagInfo, ok := q.tagInfo[next.r.tag] assert.Cond(ok, "unknown tag:", next.r.tag) if tagInfo.ReservedIOPS != nil && hadReservation { @@ -342,6 +375,7 @@ func (q *MClock) scheduleByLimitAndWeight(now float64) { assertIndexInvalid(next.r) q.inProgress++ + s.inProgress.Add(1) close(next.r.scheduled) } } @@ -351,6 +385,10 @@ func (q *MClock) scheduleByReservation(now float64) { next := heap.Pop(q.reservationQueue).(*reservationMQueueItem) q.removeFromQueues(next.r) + s, ok := q.tagStat[next.r.tag] + assert.Cond(ok, "undefined stat tag:", next.r.tag) + s.pending.Add(minusOne) + select { case <-next.r.canceled: continue @@ -359,6 +397,7 @@ func (q *MClock) scheduleByReservation(now float64) { assertIndexInvalid(next.r) q.inProgress++ + s.inProgress.Add(1) close(next.r.scheduled) } } @@ -378,7 +417,7 @@ func (q *MClock) removeFromQueues(r *request) { } } -func (q *MClock) requestCompleted() { +func (q *MClock) requestCompleted(tag string) { q.mtx.Lock() defer q.mtx.Unlock() @@ -388,6 +427,9 @@ func (q *MClock) requestCompleted() { assert.Cond(q.inProgress > 0, "invalid requests count") q.inProgress-- + s, ok := q.tagStat[tag] + assert.Cond(ok, "undefined stat tag:", tag) + s.inProgress.Add(minusOne) q.scheduleRequestUnsafe() } diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index 3aa261f..90b3b5a 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -39,6 +39,21 @@ func TestMClockSharesScheduling(t *testing.T) { releases = append(releases, release) } + stats := q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(1), s.InProgress()) + require.Equal(t, uint64(reqCount/2-1), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + var result []string var wg sync.WaitGroup for i := 0; i < reqCount; i++ { @@ -52,6 +67,21 @@ func TestMClockSharesScheduling(t *testing.T) { } wg.Wait() + stats = q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + // Requests must be scheduled as class1->class1->class2->class1->class1->class2..., // because the ratio is 2 to 1. // However, there may be deviations due to rounding and sorting. @@ -116,7 +146,37 @@ func TestMClockRequestCancel(t *testing.T) { require.Equal(t, 0, q.limitQueue.Len()) require.Equal(t, 0, q.reservationQueue.Len()) + stats := q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(1), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + release1() + + stats = q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } } func TestMClockLimitScheduling(t *testing.T) { @@ -159,6 +219,21 @@ func TestMClockLimitScheduling(t *testing.T) { } } + stats := q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + cl.v = math.MaxFloat64 var result []string @@ -202,6 +277,21 @@ func TestMClockLimitScheduling(t *testing.T) { require.Equal(t, 0, q.sharesQueue.Len()) require.Equal(t, 0, q.limitQueue.Len()) require.Equal(t, 0, q.reservationQueue.Len()) + + stats = q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } } func TestMClockReservationScheduling(t *testing.T) { @@ -245,9 +335,39 @@ func TestMClockReservationScheduling(t *testing.T) { } } + stats := q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + cl.v = 1.00001 // 1s elapsed q.scheduleRequest() + stats = q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + case "class2": + require.Equal(t, uint64(100), s.InProgress()) + require.Equal(t, uint64(reqCount/2-100), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + var result []string for i, req := range requests { select { @@ -263,6 +383,21 @@ func TestMClockReservationScheduling(t *testing.T) { require.Equal(t, "class2", res) } + stats = q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2), s.Pending()) + case "class2": + require.Equal(t, uint64(0), s.InProgress()) + require.Equal(t, uint64(reqCount/2-100), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } + cl.v = math.MaxFloat64 q.scheduleRequest() @@ -270,6 +405,21 @@ func TestMClockReservationScheduling(t *testing.T) { require.Equal(t, 0, q.sharesQueue.Len()) require.Equal(t, 0, q.limitQueue.Len()) require.Equal(t, 0, q.reservationQueue.Len()) + + stats = q.Stats() + require.Equal(t, 2, len(stats)) + for _, s := range stats { + switch s.Tag() { + case "class1": + require.Equal(t, uint64(reqCount/2), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + case "class2": + require.Equal(t, uint64(reqCount/2-100), s.InProgress()) + require.Equal(t, uint64(0), s.Pending()) + default: + require.Fail(t, "unknown tag:"+s.Tag()) + } + } } func TestMClockIdleTag(t *testing.T) { diff --git a/scheduling/stat.go b/scheduling/stat.go new file mode 100644 index 0000000..1775027 --- /dev/null +++ b/scheduling/stat.go @@ -0,0 +1,20 @@ +package scheduling + +import "sync/atomic" + +type Stat struct { + tag string + inProgress, pending atomic.Uint64 +} + +func (s *Stat) Tag() string { + return s.tag +} + +func (s *Stat) InProgress() uint64 { + return s.inProgress.Load() +} + +func (s *Stat) Pending() uint64 { + return s.pending.Load() +} From deaae4d37a061ad0d4a69aa86c3cded2b47ed7c6 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 4 Mar 2025 12:54:12 +0300 Subject: [PATCH 21/29] [#6] Makefile: Remove `fmt` rule, use `fumpt` rule instead There were several problems: - `fmt` was a duplicate of `fumpt` - `fmt` used globally installed `gofumpt` Signed-off-by: Aleksey Savchuk --- Makefile | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 731131a..4fe0061 100755 --- a/Makefile +++ b/Makefile @@ -24,12 +24,7 @@ GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) # Run all code formatters -fmts: fmt imports - -# Reformat code -fmt: - @echo "⇒ Processing gofmt check" - @gofumpt -s -w . +fmts: fumpt imports # Reformat imports imports: From d752a1c95edd9e95958ef90024d1da3db5310df7 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 4 Mar 2025 12:58:57 +0300 Subject: [PATCH 22/29] [#6] pre-commit: Add hook for running `gofumpt` Signed-off-by: Aleksey Savchuk --- .pre-commit-config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6d0e1e4..6bd9629 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,3 +37,9 @@ repos: pass_filenames: false types: [go] language: system + - id: gofumpt + name: gofumpt check + entry: make fumpt + pass_filenames: false + types: [go] + language: system From 3e7ca9403529ad0e9f4890f82df28ce721f1f23f Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 10 Mar 2025 16:58:38 +0300 Subject: [PATCH 23/29] Revert "[#7] mclock: Add tag stat" This reverts commit 25102d1e1aa3e9232af803ce3cd0088c37263de3. Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 46 +----------- scheduling/mclock_test.go | 150 -------------------------------------- scheduling/stat.go | 20 ----- 3 files changed, 2 insertions(+), 214 deletions(-) delete mode 100644 scheduling/stat.go diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 213b3cf..82037d6 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -14,7 +14,6 @@ import ( const ( invalidIndex = -1 undefinedReservation float64 = -1.0 - minusOne = ^uint64(0) ) var ( @@ -61,8 +60,6 @@ type MClock struct { clock clock idleTimeout float64 tagInfo map[string]TagInfo - tagStat map[string]*Stat - stats []*Stat mtx sync.Mutex previous map[string]*request @@ -113,16 +110,6 @@ func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeo } result.previous = previous - result.tagStat = make(map[string]*Stat, len(tagInfo)) - result.stats = make([]*Stat, 0, len(tagInfo)) - for tag := range tagInfo { - s := &Stat{ - tag: tag, - } - result.tagStat[tag] = s - result.stats = append(result.stats, s) - } - return result, nil } @@ -163,12 +150,6 @@ func (q *MClock) Close() { } } -// Stats returns per tag stat. -// Returned slice should not be modified. -func (q *MClock) Stats() []*Stat { - return q.stats -} - func validateParams(runLimit uint64, tagInfo map[string]TagInfo) error { if runLimit == 0 { return ErrInvalidRunLimit @@ -191,16 +172,11 @@ func (q *MClock) dropRequest(req *request) { q.mtx.Lock() defer q.mtx.Unlock() - s, ok := q.tagStat[req.tag] - assert.Cond(ok, "undefined stat tag:", req.tag) - select { case <-req.scheduled: assert.Cond(q.inProgress > 0, "invalid requests count") q.inProgress-- - s.inProgress.Add(minusOne) default: - s.pending.Add(minusOne) } q.removeFromQueues(req) @@ -258,14 +234,9 @@ func (q *MClock) pushRequest(tag string) (*request, ReleaseFunc, error) { } heap.Push(q.sharesQueue, &sharesMQueueItem{r: r}) heap.Push(q.limitQueue, &limitMQueueItem{r: r}) - - s, ok := q.tagStat[tag] - assert.Cond(ok, "undefined stat tag:", tag) - s.pending.Add(1) - q.scheduleRequestUnsafe() - return r, func() { q.requestCompleted(tag) }, nil + return r, q.requestCompleted, nil } func (q *MClock) adjustTags(now float64, idleTag string) { @@ -347,10 +318,6 @@ func (q *MClock) scheduleByLimitAndWeight(now float64) { } q.removeFromQueues(next.r) - s, ok := q.tagStat[next.r.tag] - assert.Cond(ok, "undefined stat tag:", next.r.tag) - s.pending.Add(minusOne) - tagInfo, ok := q.tagInfo[next.r.tag] assert.Cond(ok, "unknown tag:", next.r.tag) if tagInfo.ReservedIOPS != nil && hadReservation { @@ -375,7 +342,6 @@ func (q *MClock) scheduleByLimitAndWeight(now float64) { assertIndexInvalid(next.r) q.inProgress++ - s.inProgress.Add(1) close(next.r.scheduled) } } @@ -385,10 +351,6 @@ func (q *MClock) scheduleByReservation(now float64) { next := heap.Pop(q.reservationQueue).(*reservationMQueueItem) q.removeFromQueues(next.r) - s, ok := q.tagStat[next.r.tag] - assert.Cond(ok, "undefined stat tag:", next.r.tag) - s.pending.Add(minusOne) - select { case <-next.r.canceled: continue @@ -397,7 +359,6 @@ func (q *MClock) scheduleByReservation(now float64) { assertIndexInvalid(next.r) q.inProgress++ - s.inProgress.Add(1) close(next.r.scheduled) } } @@ -417,7 +378,7 @@ func (q *MClock) removeFromQueues(r *request) { } } -func (q *MClock) requestCompleted(tag string) { +func (q *MClock) requestCompleted() { q.mtx.Lock() defer q.mtx.Unlock() @@ -427,9 +388,6 @@ func (q *MClock) requestCompleted(tag string) { assert.Cond(q.inProgress > 0, "invalid requests count") q.inProgress-- - s, ok := q.tagStat[tag] - assert.Cond(ok, "undefined stat tag:", tag) - s.inProgress.Add(minusOne) q.scheduleRequestUnsafe() } diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index 90b3b5a..3aa261f 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -39,21 +39,6 @@ func TestMClockSharesScheduling(t *testing.T) { releases = append(releases, release) } - stats := q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(1), s.InProgress()) - require.Equal(t, uint64(reqCount/2-1), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - var result []string var wg sync.WaitGroup for i := 0; i < reqCount; i++ { @@ -67,21 +52,6 @@ func TestMClockSharesScheduling(t *testing.T) { } wg.Wait() - stats = q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - // Requests must be scheduled as class1->class1->class2->class1->class1->class2..., // because the ratio is 2 to 1. // However, there may be deviations due to rounding and sorting. @@ -146,37 +116,7 @@ func TestMClockRequestCancel(t *testing.T) { require.Equal(t, 0, q.limitQueue.Len()) require.Equal(t, 0, q.reservationQueue.Len()) - stats := q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(1), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - release1() - - stats = q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } } func TestMClockLimitScheduling(t *testing.T) { @@ -219,21 +159,6 @@ func TestMClockLimitScheduling(t *testing.T) { } } - stats := q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - cl.v = math.MaxFloat64 var result []string @@ -277,21 +202,6 @@ func TestMClockLimitScheduling(t *testing.T) { require.Equal(t, 0, q.sharesQueue.Len()) require.Equal(t, 0, q.limitQueue.Len()) require.Equal(t, 0, q.reservationQueue.Len()) - - stats = q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } } func TestMClockReservationScheduling(t *testing.T) { @@ -335,39 +245,9 @@ func TestMClockReservationScheduling(t *testing.T) { } } - stats := q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - cl.v = 1.00001 // 1s elapsed q.scheduleRequest() - stats = q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - case "class2": - require.Equal(t, uint64(100), s.InProgress()) - require.Equal(t, uint64(reqCount/2-100), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - var result []string for i, req := range requests { select { @@ -383,21 +263,6 @@ func TestMClockReservationScheduling(t *testing.T) { require.Equal(t, "class2", res) } - stats = q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2), s.Pending()) - case "class2": - require.Equal(t, uint64(0), s.InProgress()) - require.Equal(t, uint64(reqCount/2-100), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } - cl.v = math.MaxFloat64 q.scheduleRequest() @@ -405,21 +270,6 @@ func TestMClockReservationScheduling(t *testing.T) { require.Equal(t, 0, q.sharesQueue.Len()) require.Equal(t, 0, q.limitQueue.Len()) require.Equal(t, 0, q.reservationQueue.Len()) - - stats = q.Stats() - require.Equal(t, 2, len(stats)) - for _, s := range stats { - switch s.Tag() { - case "class1": - require.Equal(t, uint64(reqCount/2), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - case "class2": - require.Equal(t, uint64(reqCount/2-100), s.InProgress()) - require.Equal(t, uint64(0), s.Pending()) - default: - require.Fail(t, "unknown tag:"+s.Tag()) - } - } } func TestMClockIdleTag(t *testing.T) { diff --git a/scheduling/stat.go b/scheduling/stat.go deleted file mode 100644 index 1775027..0000000 --- a/scheduling/stat.go +++ /dev/null @@ -1,20 +0,0 @@ -package scheduling - -import "sync/atomic" - -type Stat struct { - tag string - inProgress, pending atomic.Uint64 -} - -func (s *Stat) Tag() string { - return s.tag -} - -func (s *Stat) InProgress() uint64 { - return s.inProgress.Load() -} - -func (s *Stat) Pending() uint64 { - return s.pending.Load() -} From 346752477b8419df9811415e3bc935bc83b5d053 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 19 Mar 2025 15:51:38 +0300 Subject: [PATCH 24/29] [#12] mclock: Fix timer-based scheduling Let's assume that there are two requests in the queue with execution time t1 and t2. The timer is set to t1. The timer is triggered, schedules the t1 request, calculates the time for the next timer t2 to be triggered. But it doesn't schedules timer to this time because of the `q.timeBasedScheduleTs > nextTs` check. Signed-off-by: Dmitrii Stepanov --- scheduling/clock.go | 29 ++++++++++++++++++++--------- scheduling/mclock.go | 38 ++++++++++++++++++-------------------- scheduling/mclock_test.go | 22 ++++++++++++++++++++++ 3 files changed, 60 insertions(+), 29 deletions(-) diff --git a/scheduling/clock.go b/scheduling/clock.go index 9fe66bd..6fa3d84 100644 --- a/scheduling/clock.go +++ b/scheduling/clock.go @@ -1,6 +1,7 @@ package scheduling import ( + "math" "sync" "time" ) @@ -36,10 +37,7 @@ func (c *systemClock) now() float64 { } func (c *systemClock) runAt(ts float64, f func()) { - select { - case c.schedule <- scheduleInfo{ts: ts, f: f}: - default: // timer fired, scheduleRequest will call runAt again - } + c.schedule <- scheduleInfo{ts: ts, f: f} } func (c *systemClock) close() { @@ -53,18 +51,30 @@ func (c *systemClock) start() { defer c.wg.Done() t := time.NewTimer(0) <-t.C - var f func() + currentTs := math.MaxFloat64 + var currentTask func() for { select { case <-t.C: - if f != nil { - f() - f = nil + if currentTask != nil { + c.wg.Add(1) + f := currentTask + go func() { + defer c.wg.Done() + f() + }() + currentTask = nil } + currentTs = math.MaxFloat64 case s, ok := <-c.schedule: if !ok { return } + if s.ts >= currentTs { + // current timer will fire earlier + // so next scheduleRequest will push new schedule event + continue + } var d time.Duration now := c.now() if now < s.ts { @@ -77,7 +87,8 @@ func (c *systemClock) start() { } } t.Reset(d) - f = s.f + currentTask = s.f + currentTs = s.ts } } }() diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 82037d6..6d13d5d 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -61,15 +61,14 @@ type MClock struct { idleTimeout float64 tagInfo map[string]TagInfo - mtx sync.Mutex - previous map[string]*request - inProgress uint64 - timeBasedScheduleTs float64 - reservationQueue *queue - limitQueue *queue - sharesQueue *queue - readyQueue *queue - closed bool + mtx sync.Mutex + previous map[string]*request + inProgress uint64 + reservationQueue *queue + limitQueue *queue + sharesQueue *queue + readyQueue *queue + closed bool } // NewMClock creates new MClock scheduler instance with @@ -92,11 +91,10 @@ func NewMClock(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeo idleTimeout: idleTimeout.Seconds(), tagInfo: tagInfo, - reservationQueue: &queue{}, - limitQueue: &queue{}, - sharesQueue: &queue{}, - readyQueue: &queue{}, - timeBasedScheduleTs: math.MaxFloat64, + reservationQueue: &queue{}, + limitQueue: &queue{}, + sharesQueue: &queue{}, + readyQueue: &queue{}, } previous := make(map[string]*request) @@ -284,23 +282,23 @@ func (q *MClock) scheduleRequestUnsafe() { func (q *MClock) setNextScheduleTimer(now float64) { nextTs := math.MaxFloat64 + var hasNext bool if q.reservationQueue.Len() > 0 { nextTs = q.reservationQueue.items[0].ts() + hasNext = true } if q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() < nextTs { nextTs = q.limitQueue.items[0].ts() + hasNext = true } if nextTs <= now { // should not happen as we always compare .ts() <= now return } - - if q.timeBasedScheduleTs > nextTs { - q.clock.runAt(nextTs, func() { - q.scheduleRequest() - }) - q.timeBasedScheduleTs = nextTs + if !hasNext { + return } + q.clock.runAt(nextTs, q.scheduleRequest) } func (q *MClock) scheduleByLimitAndWeight(now float64) { diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index 3aa261f..f9da670 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -493,3 +493,25 @@ func TestMClockTimeBasedSchedule(t *testing.T) { close(checked) require.NoError(t, eg.Wait()) } + +func TestMClockLowLimit(t *testing.T) { + t.Parallel() + limit := 2.0 + q, err := NewMClock(100, 100, map[string]TagInfo{ + "class1": {Share: 50, LimitIOPS: &limit}, + }, 5*time.Second) + require.NoError(t, err) + defer q.Close() + + eg, ctx := errgroup.WithContext(context.Background()) + eg.SetLimit(5) + eg.Go(func() error { + for range 3 { + release, err := q.RequestArrival(ctx, "class1") + require.NoError(t, err) + release() + } + return nil + }) + require.NoError(t, eg.Wait()) +} From 1ca213ee7cb6504561a314220312d4f0c2abc446 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 20 Mar 2025 10:31:15 +0300 Subject: [PATCH 25/29] [#12] mclock: Fix deadlock caused by mclock.Close Deadlock scenario: - mclock closed by `Close` method, it locks mutex and calls `clock.close` - clock starts `scheduleRequest` goroutine, it tries to lock mutex - `clock.Close` waits for all goroutines Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 6d13d5d..63a969c 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -137,15 +137,15 @@ func (q *MClock) RequestArrival(ctx context.Context, tag string) (ReleaseFunc, e // No new requests for scheduling will be accepted after the closing. func (q *MClock) Close() { q.mtx.Lock() - defer q.mtx.Unlock() - q.closed = true - q.clock.close() for q.limitQueue.Len() > 0 { item := heap.Pop(q.limitQueue).(*limitMQueueItem) close(item.r.canceled) q.removeFromQueues(item.r) } + q.mtx.Unlock() + + q.clock.close() } func validateParams(runLimit uint64, tagInfo map[string]TagInfo) error { @@ -261,6 +261,10 @@ func (q *MClock) scheduleRequest() { q.mtx.Lock() defer q.mtx.Unlock() + if q.closed { + return + } + q.scheduleRequestUnsafe() } From 32079ad7c2752b2ee1ed89c927e11418b641655a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 20 Mar 2025 17:24:39 +0300 Subject: [PATCH 26/29] [#12] grpc: Fix method name Signed-off-by: Dmitrii Stepanov --- tagging/grpc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tagging/grpc.go b/tagging/grpc.go index 5e255dd..4e2fcfe 100644 --- a/tagging/grpc.go +++ b/tagging/grpc.go @@ -11,8 +11,8 @@ const ( ioTagHeader = "x-frostfs-io-tag" ) -// NewUnaryClientInteceptor creates new gRPC unary interceptor to set an IO tag to gRPC metadata. -func NewUnaryClientInteceptor() grpc.UnaryClientInterceptor { +// NewUnaryClientInterceptor creates new gRPC unary interceptor to set an IO tag to gRPC metadata. +func NewUnaryClientInterceptor() grpc.UnaryClientInterceptor { return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { return invoker(setIOTagToGRPCMetadata(ctx), method, req, reply, cc, opts...) } From 57d895c32167472184eb755df7b1d3114b6a63c7 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 24 Mar 2025 15:33:50 +0300 Subject: [PATCH 27/29] [#13] mclock: Schedule requests as soon as possible Let's assume that for some tag `limit = 1000 RPS` defined and each request takes 10 ms to complete. At some point in time 1000 requests were accepted. Then first request will be scheduled at `now()`, second - at `now() + 1 ms`, third - at `now() + 2 ms` etc. Total processing duration of 1000 requests will be 1 second + 10 ms. After this fix scheduler looks forward to schedule requests within limit. So for situation above total processing duration of 1000 requests will be 10 ms in ideal world. The same for reservation scheduling. Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 4 +-- scheduling/mclock_test.go | 56 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 6 deletions(-) diff --git a/scheduling/mclock.go b/scheduling/mclock.go index 63a969c..f9bf2d2 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -306,7 +306,7 @@ func (q *MClock) setNextScheduleTimer(now float64) { } func (q *MClock) scheduleByLimitAndWeight(now float64) { - for q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() <= now { + for q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() < now+1.0 { ready := heap.Pop(q.limitQueue).(*limitMQueueItem) heap.Push(q.readyQueue, &readyMQueueItem{r: ready.r}) } @@ -349,7 +349,7 @@ func (q *MClock) scheduleByLimitAndWeight(now float64) { } func (q *MClock) scheduleByReservation(now float64) { - for q.inProgress < q.runLimit && q.reservationQueue.Len() > 0 && q.reservationQueue.items[0].ts() <= now { + for q.inProgress < q.runLimit && q.reservationQueue.Len() > 0 && q.reservationQueue.items[0].ts() < now+1.0 { next := heap.Pop(q.reservationQueue).(*reservationMQueueItem) q.removeFromQueues(next.r) diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index f9da670..81e1811 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -210,7 +210,7 @@ func TestMClockReservationScheduling(t *testing.T) { reqCount = (reqCount / 2) * 2 limit := 0.01 // 1 request in 100 seconds resevation := 100.0 // 100 RPS - cl := &noopClock{} + cl := &noopClock{v: float64(1.0)} q, err := NewMClock(uint64(reqCount), math.MaxUint64, map[string]TagInfo{ "class1": {Share: 2, LimitIOPS: &limit}, "class2": {Share: 1, LimitIOPS: &limit, ReservedIOPS: &resevation}, @@ -237,15 +237,18 @@ func TestMClockReservationScheduling(t *testing.T) { q.scheduleRequest() + count := 0 for _, req := range requests { select { case <-req.scheduled: - require.Fail(t, "no request must be scheduled because of time is 0.0 but limit values are greater than 0.0") + require.Equal(t, req.tag, "class2") + count++ default: } } + require.Equal(t, 100, count, "class2 has 100 requests reserved, so only 100 requests must be scheduled") - cl.v = 1.00001 // 1s elapsed + cl.v = 1.9999 // 1s elapsed - 0.999 to take into account float64 accuracy q.scheduleRequest() var result []string @@ -258,7 +261,7 @@ func TestMClockReservationScheduling(t *testing.T) { } } - require.Equal(t, 100, len(result)) + require.Equal(t, 200, len(result)) for _, res := range result { require.Equal(t, "class2", res) } @@ -515,3 +518,48 @@ func TestMClockLowLimit(t *testing.T) { }) require.NoError(t, eg.Wait()) } + +func TestMClockLimitTotalTime(t *testing.T) { + t.Parallel() + limit := 10.0 // 10 RPS -> 1 request per 100 ms + q, err := NewMClock(100, 100, map[string]TagInfo{ + "class1": {Share: 50, LimitIOPS: &limit}, + }, 5*time.Second) + require.NoError(t, err) + defer q.Close() + + // 10 requests, each request runs for 500 ms, + // but they should be scheduled as soon as possible, + // so total duration must be less than 1 second + eg, ctx := errgroup.WithContext(context.Background()) + startedAt := time.Now() + for range 10 { + eg.Go(func() error { + release, err := q.RequestArrival(ctx, "class1") + require.NoError(t, err) + time.Sleep(500 * time.Millisecond) + release() + return nil + }) + } + require.NoError(t, eg.Wait()) + require.True(t, time.Since(startedAt) <= 1*time.Second) + + // 11 requests, limit = 10 RPS, so 10 requests should be + // scheduled as soon as possible, but last request should be + // scheduled at now + 1.0 s + eg, ctx = errgroup.WithContext(context.Background()) + startedAt = time.Now() + for range 11 { + eg.Go(func() error { + release, err := q.RequestArrival(ctx, "class1") + require.NoError(t, err) + time.Sleep(500 * time.Millisecond) + release() + return nil + }) + } + require.NoError(t, eg.Wait()) + require.True(t, time.Since(startedAt) >= 1500*time.Millisecond) + require.True(t, time.Since(startedAt) <= 1600*time.Millisecond) // 100 ms offset to complete all requests +} From 6c6e5bf4de10503f7e86e7916229f307b12ce184 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 28 Mar 2025 13:39:29 +0300 Subject: [PATCH 28/29] [#14] mclock: Allow to prohibit tag requests It is now possible to restrict requests for a specific tag. A separate field in `TagInfo` is used to avoid comparing float64 values with zero. Signed-off-by: Dmitrii Stepanov --- scheduling/mclock.go | 5 +++++ scheduling/mclock_test.go | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/scheduling/mclock.go b/scheduling/mclock.go index f9bf2d2..64c62a8 100644 --- a/scheduling/mclock.go +++ b/scheduling/mclock.go @@ -22,6 +22,7 @@ var ( ErrMClockSchedulerUnknownTag = errors.New("unknown tag") ErrInvalidTagInfo = errors.New("invalid tag info: shares, limit and reservation must be greater than zero") ErrInvalidRunLimit = errors.New("invalid run limit: must be greater than zero") + ErrTagRequestsProhibited = errors.New("tag requests are prohibited") ) type request struct { @@ -49,6 +50,7 @@ type TagInfo struct { ReservedIOPS *float64 LimitIOPS *float64 Share float64 + Prohibited bool } // MClock is mClock scheduling algorithm implementation. @@ -196,6 +198,9 @@ func (q *MClock) pushRequest(tag string) (*request, ReleaseFunc, error) { if !ok { return nil, nil, ErrMClockSchedulerUnknownTag } + if tagInfo.Prohibited { + return nil, nil, ErrTagRequestsProhibited + } prev, ok := q.previous[tag] assert.Cond(ok, "undefined previous:", tag) diff --git a/scheduling/mclock_test.go b/scheduling/mclock_test.go index 81e1811..6433990 100644 --- a/scheduling/mclock_test.go +++ b/scheduling/mclock_test.go @@ -563,3 +563,22 @@ func TestMClockLimitTotalTime(t *testing.T) { require.True(t, time.Since(startedAt) >= 1500*time.Millisecond) require.True(t, time.Since(startedAt) <= 1600*time.Millisecond) // 100 ms offset to complete all requests } + +func TestMClockRestictTagRequests(t *testing.T) { + t.Parallel() + limit := 10.0 + q, err := NewMClock(100, 100, map[string]TagInfo{ + "class1": {Share: 50, LimitIOPS: &limit}, + "class2": {Share: 50, LimitIOPS: &limit, Prohibited: true}, + }, 5*time.Second) + require.NoError(t, err) + defer q.Close() + + release, err := q.RequestArrival(context.Background(), "class1") + require.NoError(t, err) + release() + + release, err = q.RequestArrival(context.Background(), "class2") + require.ErrorIs(t, err, ErrTagRequestsProhibited) + require.Nil(t, release) +} From b5ed0b6eff475ecaa61e1a33b5346f449806ee37 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 28 Mar 2025 13:43:11 +0300 Subject: [PATCH 29/29] [#14] CODEOWNERS: Use core commiters and developers groups Signed-off-by: Dmitrii Stepanov --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index b6fa647..d19c96a 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1,3 @@ -.* @fyrchik +.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers .forgejo/.* @potyarkin Makefile @potyarkin