forked from TrueCloudLab/frostfs-testlib
Compare commits
238 commits
Author | SHA1 | Date | |
---|---|---|---|
24e1dfef28 | |||
0c9660fffc | |||
8eaa511e5c | |||
a1953684b8 | |||
451de5e07e | |||
f24bfc06fd | |||
47bc11835b | |||
2a90ec74ff | |||
95b32a036a | |||
55d8ee5da0 | |||
ea40940514 | |||
6f1baf3cf6 | |||
26139767f4 | |||
3d6a356e20 | |||
e6faddedeb | |||
b2bf6677f1 | |||
3f3be83d90 | |||
5fa58a55c0 | |||
738cfacbb7 | |||
cf48f474eb | |||
2a41f2b0f6 | |||
a04eba8aec | |||
2976e30b75 | |||
24b8ca73d7 | |||
cef64e315e | |||
0d750ed114 | |||
1bee69042b | |||
4a2ac8a9b6 | |||
36bfe385d5 | |||
565fd4c72b | |||
84e83487f9 | |||
d2f8323fb9 | |||
eba782e7d2 | |||
85c2707ec8 | |||
0caca54e36 | |||
8ae1b99db9 | |||
6926c09dbe | |||
1c2ed25929 | |||
0ba4a73db3 | |||
8a8b35846e | |||
5bdacdf5ba | |||
ae9e8d8c30 | |||
54b42e2d8d | |||
ea60c2104a | |||
8306a9f3ff | |||
6b036a09b7 | |||
a983e0566e | |||
7a500330de | |||
166e44da9c | |||
4c0d76408c | |||
40dfd015a8 | |||
f472d7e1ce | |||
b6a657e76c | |||
6f99aef406 | |||
996f92ffa7 | |||
429698944e | |||
376499a7e8 | |||
f4460194bc | |||
3a4204f2e4 | |||
c9e4c2c7bb | |||
da16f3c3a5 | |||
f1b2fbd47b | |||
cb31d41f15 | |||
7a482152a8 | |||
bfd7f70b6c | |||
10821f4c49 | |||
5d192524a0 | |||
a3b78559a9 | |||
ec42b156ac | |||
ea1b348120 | |||
e7423938e9 | |||
a563f089f6 | |||
37a1177a3c | |||
b8ce75b299 | |||
3fee7aa197 | |||
3e64b52306 | |||
0306c09bed | |||
a32bd120f2 | |||
5b715877b3 | |||
c0e37c8138 | |||
80c65b454e | |||
541a3e0636 | |||
70f0357960 | |||
a85070e957 | |||
82a8f9bab3 | |||
65ec50391e | |||
863e74f161 | |||
6629b9bbaa | |||
e2a170d66e | |||
338584069d | |||
9cfaf1a618 | |||
076e444f84 | |||
653621fb7e | |||
2dc5aa8a1e | |||
11487e983d | |||
9c508c4f66 | |||
f2bded64e4 | |||
0e247c2ff2 | |||
b323bcfd0a | |||
25925c637b | |||
09a7f66d1e | |||
22b41b227f | |||
f5a7ff5c90 | |||
3fc3eaadf3 | |||
273f0d13a5 | |||
55cebc042c | |||
751381cd60 | |||
4f3814690e | |||
d79fd87ede | |||
8ba2cb8030 | |||
6caa77dedf | |||
0d7a15877c | |||
82f9df088a | |||
e04fac0770 | |||
328e43fe67 | |||
c0a25ab699 | |||
40fa2c24cc | |||
be36a10f1e | |||
df8d99d83c | |||
d6a2cf92a2 | |||
a3bda0b34f | |||
a4d1082ed5 | |||
73c362c307 | |||
10a6efa333 | |||
663c144709 | |||
8e739adea5 | |||
3d63772f4a | |||
02f3ef6b40 | |||
89522b607c | |||
be964e731f | |||
f1264bd473 | |||
54d26b226c | |||
247d2fbab7 | |||
ae566b413b | |||
81dfc723da | |||
e65fc359fe | |||
17c1a4f14b | |||
dc6b0e407f | |||
39a17f3634 | |||
47414eb866 | |||
c17f0f6173 | |||
d1ba7eb661 | |||
f072f88673 | |||
253bb3b1d8 | |||
9ab4def44f | |||
ed8f90dfc0 | |||
ed70dada96 | |||
22647c6d59 | |||
61a1b28652 | |||
6519cfafc9 | |||
72bd467c53 | |||
f8562da7e0 | |||
c8227e80af | |||
1f50166e78 | |||
03c45d7592 | |||
e970fe2788 | |||
8ee2985c89 | |||
137fd21561 | |||
f3c160f313 | |||
3af4dfd977 | |||
8a360683ae | |||
f4111a1374 | |||
b1a3d740e9 | |||
0c3bb20af5 | |||
e1f3444e92 | |||
cff5db5a67 | |||
1c3bbe26f7 | |||
dd347dd8fb | |||
98f9c78f09 | |||
2c2af7f8ed | |||
d039bcc221 | |||
e919064bb9 | |||
98ccd4c382 | |||
9feb8135e3 | |||
64f004d5a5 | |||
eb37573df8 | |||
602de43bff | |||
fc1f373477 | |||
b039ee9940 | |||
be9b3f5855 | |||
f7ef8cb881 | |||
ecf8f0841a | |||
19b8b96898 | |||
f2d34dbf2e | |||
e14896400f | |||
449c18bb1a | |||
aa277fdd6a | |||
7059596506 | |||
7112bf9c88 | |||
b1c21e0e5b | |||
02c079eda3 | |||
d28f3cdc28 | |||
e4878f4d1e | |||
807235af95 | |||
716a780a13 | |||
d6e08c477b | |||
612e088763 | |||
|
b856e82008 | ||
2240be09d2 | |||
38742badf2 | |||
9c792c091e | |||
49ccd47e81 | |||
675183cd9a | |||
8dcfae5cb2 | |||
15862e5901 | |||
4896abcec3 | |||
62216293f8 | |||
59b4157991 | |||
917dc6f6d8 | |||
|
14c85e0a9e | ||
|
3050ccc9fa | ||
f8409fa9f9 | |||
a14b082a4d | |||
05ac392485 | |||
|
ac28df2652 | ||
889e108be9 | |||
182bd6ab36 | |||
13ea25bff5 | |||
c0f63e3783 | |||
|
98f5075715 | ||
|
f2f3d3c8e3 | ||
26a78c0eae | |||
e9777b63cd | |||
e3c0f76896 | |||
987e7f2a30 | |||
584ba5f0d1 | |||
2bad0f1db6 | |||
|
a26f8e9c80 | ||
cc35b2e6da | |||
123b5425a8 | |||
32a8c5274a | |||
6cdeb497c0 | |||
a34c34991f | |||
10468fa545 | |||
|
d9504697ba | ||
d30d3d5cfd | |||
7399cc9a8e | |||
70416d40c2 |
149 changed files with 11671 additions and 3513 deletions
109
.devenv.hosting.yaml
Normal file
109
.devenv.hosting.yaml
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
hosts:
|
||||||
|
- address: localhost
|
||||||
|
hostname: localhost
|
||||||
|
attributes:
|
||||||
|
sudo_shell: false
|
||||||
|
plugin_name: docker
|
||||||
|
healthcheck_plugin_name: basic
|
||||||
|
attributes:
|
||||||
|
skip_readiness_check: True
|
||||||
|
force_transactions: True
|
||||||
|
services:
|
||||||
|
- name: frostfs-storage_01
|
||||||
|
attributes:
|
||||||
|
container_name: s01
|
||||||
|
config_path: /etc/frostfs/storage/config.yml
|
||||||
|
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
|
||||||
|
wallet_password: ""
|
||||||
|
volume_name: storage_storage_s01
|
||||||
|
endpoint_data0: s01.frostfs.devenv:8080
|
||||||
|
control_endpoint: s01.frostfs.devenv:8081
|
||||||
|
un_locode: "RU MOW"
|
||||||
|
- name: frostfs-storage_02
|
||||||
|
attributes:
|
||||||
|
container_name: s02
|
||||||
|
config_path: /etc/frostfs/storage/config.yml
|
||||||
|
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
|
||||||
|
wallet_password: ""
|
||||||
|
volume_name: storage_storage_s02
|
||||||
|
endpoint_data0: s02.frostfs.devenv:8080
|
||||||
|
control_endpoint: s02.frostfs.devenv:8081
|
||||||
|
un_locode: "RU LED"
|
||||||
|
- name: frostfs-storage_03
|
||||||
|
attributes:
|
||||||
|
container_name: s03
|
||||||
|
config_path: /etc/frostfs/storage/config.yml
|
||||||
|
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
|
||||||
|
wallet_password: ""
|
||||||
|
volume_name: storage_storage_s03
|
||||||
|
endpoint_data0: s03.frostfs.devenv:8080
|
||||||
|
control_endpoint: s03.frostfs.devenv:8081
|
||||||
|
un_locode: "SE STO"
|
||||||
|
- name: frostfs-storage_04
|
||||||
|
attributes:
|
||||||
|
container_name: s04
|
||||||
|
config_path: /etc/frostfs/storage/config.yml
|
||||||
|
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
|
||||||
|
wallet_password: ""
|
||||||
|
volume_name: storage_storage_s04
|
||||||
|
endpoint_data0: s04.frostfs.devenv:8080
|
||||||
|
control_endpoint: s04.frostfs.devenv:8081
|
||||||
|
un_locode: "FI HEL"
|
||||||
|
- name: frostfs-s3_01
|
||||||
|
attributes:
|
||||||
|
container_name: s3_gate
|
||||||
|
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
|
||||||
|
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/password-s3.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
|
||||||
|
wallet_password: "s3"
|
||||||
|
endpoint_data0: https://s3.frostfs.devenv:8080
|
||||||
|
- name: frostfs-http_01
|
||||||
|
attributes:
|
||||||
|
container_name: http_gate
|
||||||
|
config_path: ../frostfs-dev-env/services/http_gate/.http.env
|
||||||
|
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
|
||||||
|
wallet_password: "one"
|
||||||
|
endpoint_data0: http://http.frostfs.devenv
|
||||||
|
- name: frostfs-ir_01
|
||||||
|
attributes:
|
||||||
|
container_name: ir01
|
||||||
|
config_path: ../frostfs-dev-env/services/ir/.ir.env
|
||||||
|
wallet_path: ../frostfs-dev-env/services/ir/az.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
|
||||||
|
wallet_password: "one"
|
||||||
|
- name: neo-go_01
|
||||||
|
attributes:
|
||||||
|
container_name: morph_chain
|
||||||
|
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
|
||||||
|
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
|
||||||
|
wallet_password: "one"
|
||||||
|
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
|
||||||
|
- name: main-chain_01
|
||||||
|
attributes:
|
||||||
|
container_name: main_chain
|
||||||
|
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
|
||||||
|
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
|
||||||
|
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||||
|
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
|
||||||
|
wallet_password: "one"
|
||||||
|
endpoint_internal0: http://main-chain.frostfs.devenv:30333
|
||||||
|
- name: coredns_01
|
||||||
|
attributes:
|
||||||
|
container_name: coredns
|
||||||
|
clis:
|
||||||
|
- name: frostfs-cli
|
||||||
|
exec_path: frostfs-cli
|
21
.forgejo/workflows/dco.yml
Normal file
21
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
name: DCO action
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dco:
|
||||||
|
name: DCO
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.21'
|
||||||
|
|
||||||
|
- name: Run commit format checker
|
||||||
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
with:
|
||||||
|
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -1 +0,0 @@
|
||||||
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny
|
|
21
.github/workflows/dco.yml
vendored
21
.github/workflows/dco.yml
vendored
|
@ -1,21 +0,0 @@
|
||||||
name: DCO check
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
commits_check_job:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Commits Check
|
|
||||||
steps:
|
|
||||||
- name: Get PR Commits
|
|
||||||
id: 'get-pr-commits'
|
|
||||||
uses: tim-actions/get-pr-commits@master
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: DCO Check
|
|
||||||
uses: tim-actions/dco@master
|
|
||||||
with:
|
|
||||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,6 +1,7 @@
|
||||||
# ignore IDE files
|
# ignore IDE files
|
||||||
.vscode
|
.vscode
|
||||||
.idea
|
.idea
|
||||||
|
venv.*
|
||||||
|
|
||||||
# ignore temp files under any path
|
# ignore temp files under any path
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
@ -10,3 +11,4 @@
|
||||||
/dist
|
/dist
|
||||||
/build
|
/build
|
||||||
*.egg-info
|
*.egg-info
|
||||||
|
wallet_config.yml
|
1
CODEOWNERS
Normal file
1
CODEOWNERS
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov
|
|
@ -3,8 +3,8 @@
|
||||||
First, thank you for contributing! We love and encourage pull requests from
|
First, thank you for contributing! We love and encourage pull requests from
|
||||||
everyone. Please follow the guidelines:
|
everyone. Please follow the guidelines:
|
||||||
|
|
||||||
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and
|
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and
|
||||||
[pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing
|
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing
|
||||||
discussions.
|
discussions.
|
||||||
|
|
||||||
- Open an issue first, to discuss a new feature or enhancement.
|
- Open an issue first, to discuss a new feature or enhancement.
|
||||||
|
@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and
|
||||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||||
are the steps in details:
|
are the steps in details:
|
||||||
|
|
||||||
### Set up your GitHub Repository
|
### Set up your Git Repository
|
||||||
Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source
|
Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source
|
||||||
repository to your own personal repository. Copy the URL of your fork and clone it:
|
repository to your own personal repository. Copy the URL of your fork and clone it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -37,7 +37,7 @@ $ git clone <url of your fork>
|
||||||
### Set up git remote as ``upstream``
|
### Set up git remote as ``upstream``
|
||||||
```shell
|
```shell
|
||||||
$ cd frostfs-testlib
|
$ cd frostfs-testlib
|
||||||
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib
|
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib
|
||||||
$ git fetch upstream
|
$ git fetch upstream
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome
|
||||||
```
|
```
|
||||||
|
|
||||||
### Test your changes
|
### Test your changes
|
||||||
Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command:
|
Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command:
|
||||||
```shell
|
```shell
|
||||||
$ python -m unittest discover --start-directory tests
|
$ make validation
|
||||||
```
|
```
|
||||||
|
|
||||||
To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests:
|
To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests:
|
||||||
|
@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a Pull Request
|
### Create a Pull Request
|
||||||
Pull requests can be created via GitHub. Refer to [this
|
Pull requests can be created via Git. Refer to [this
|
||||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||||
reviewed and approved, it will be merged.
|
reviewed and approved, it will be merged.
|
||||||
|
|
||||||
|
|
41
Makefile
41
Makefile
|
@ -1,8 +1,11 @@
|
||||||
SHELL := /bin/bash
|
SHELL := /bin/bash
|
||||||
PYTHON_VERSION := 3.10
|
PYTHON_VERSION := 3.10
|
||||||
VENV_DIR := venv.frostfs-testlib
|
VENV_NAME := frostfs-testlib
|
||||||
|
VENV_DIR := venv.${VENV_NAME}
|
||||||
|
|
||||||
current_dir := $(shell pwd)
|
current_dir := $(shell pwd)
|
||||||
|
DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/)))
|
||||||
|
FROM_VENV := . ${VENV_DIR}/bin/activate &&
|
||||||
|
|
||||||
venv: create requirements paths precommit
|
venv: create requirements paths precommit
|
||||||
@echo Ready
|
@echo Ready
|
||||||
|
@ -13,15 +16,35 @@ precommit:
|
||||||
|
|
||||||
paths:
|
paths:
|
||||||
@echo Append paths for project
|
@echo Append paths for project
|
||||||
@echo Virtual environment: ${VENV_DIR}
|
@echo Virtual environment: ${current_dir}/${VENV_DIR}
|
||||||
@sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
@rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||||
@sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
@touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||||
@echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
@echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||||
|
|
||||||
create:
|
create: ${VENV_DIR}
|
||||||
@echo Create virtual environment for
|
|
||||||
virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR}
|
${VENV_DIR}:
|
||||||
|
@echo Create virtual environment ${current_dir}/${VENV_DIR}
|
||||||
|
virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR}
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
@echo Isntalling pip requirements
|
@echo Isntalling pip requirements
|
||||||
. ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt
|
. ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
#### VALIDATION SECTION ####
|
||||||
|
lint: create requirements
|
||||||
|
${FROM_VENV} pylint --disable R,C,W ./src
|
||||||
|
|
||||||
|
unit_test:
|
||||||
|
@echo Starting unit tests
|
||||||
|
${FROM_VENV} python -m pytest tests
|
||||||
|
|
||||||
|
.PHONY: lint_dependent $(DIRECTORIES)
|
||||||
|
lint_dependent: $(DIRECTORIES)
|
||||||
|
|
||||||
|
$(DIRECTORIES):
|
||||||
|
@echo checking dependent repo $@
|
||||||
|
$(MAKE) validation -C $@
|
||||||
|
|
||||||
|
validation: lint unit_test lint_dependent
|
|
@ -92,4 +92,4 @@ The library provides the following primary components:
|
||||||
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md).
|
Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md).
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "frostfs-testlib"
|
name = "frostfs-testlib"
|
||||||
version = "2.0.0"
|
version = "2.0.1"
|
||||||
description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system"
|
description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
authors = [{ name = "Yadro", email = "info@yadro.com" }]
|
authors = [{ name = "Yadro", email = "info@yadro.com" }]
|
||||||
|
@ -16,19 +16,19 @@ classifiers = [
|
||||||
]
|
]
|
||||||
keywords = ["frostfs", "test"]
|
keywords = ["frostfs", "test"]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"allure-python-commons>=2.9.45",
|
"allure-python-commons>=2.13.2",
|
||||||
"docker>=4.4.0",
|
"docker>=4.4.0",
|
||||||
"importlib_metadata>=5.0; python_version < '3.10'",
|
"pyyaml==6.0.1",
|
||||||
"neo-mamba==1.0.0",
|
"neo-mamba==1.0.0",
|
||||||
"paramiko>=2.10.3",
|
"paramiko>=2.10.3",
|
||||||
"pexpect>=4.8.0",
|
"pexpect>=4.8.0",
|
||||||
"requests>=2.28.0",
|
"requests==2.28.1",
|
||||||
"docstring_parser>=0.15",
|
"docstring_parser>=0.15",
|
||||||
"testrail-api>=1.12.0",
|
"testrail-api>=1.12.0",
|
||||||
"pytest==7.1.2",
|
"pytest==7.1.2",
|
||||||
"tenacity==8.0.1",
|
"tenacity==8.0.1",
|
||||||
"boto3==1.16.33",
|
"boto3==1.35.30",
|
||||||
"boto3-stubs[essential]==1.16.33",
|
"boto3-stubs[essential]==1.35.30",
|
||||||
]
|
]
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ requires-python = ">=3.10"
|
||||||
dev = ["black", "bumpver", "isort", "pre-commit"]
|
dev = ["black", "bumpver", "isort", "pre-commit"]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Homepage = "https://github.com/TrueCloudLab/frostfs-testlib"
|
Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib"
|
||||||
|
|
||||||
[project.entry-points."frostfs.testlib.reporter"]
|
[project.entry-points."frostfs.testlib.reporter"]
|
||||||
allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
|
allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
|
||||||
|
@ -44,17 +44,37 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
|
||||||
[project.entry-points."frostfs.testlib.hosting"]
|
[project.entry-points."frostfs.testlib.hosting"]
|
||||||
docker = "frostfs_testlib.hosting.docker_host:DockerHost"
|
docker = "frostfs_testlib.hosting.docker_host:DockerHost"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.healthcheck"]
|
||||||
|
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.csc_managers"]
|
||||||
|
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.services"]
|
||||||
|
frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
|
||||||
|
frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
|
||||||
|
frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
|
||||||
|
neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
|
||||||
|
frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.credentials_providers"]
|
||||||
|
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
|
||||||
|
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
|
||||||
|
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
|
||||||
|
|
||||||
[tool.isort]
|
[tool.isort]
|
||||||
profile = "black"
|
profile = "black"
|
||||||
src_paths = ["src", "tests"]
|
src_paths = ["src", "tests"]
|
||||||
line_length = 100
|
line_length = 140
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
line-length = 100
|
line-length = 140
|
||||||
target-version = ["py310"]
|
target-version = ["py310"]
|
||||||
|
|
||||||
[tool.bumpver]
|
[tool.bumpver]
|
||||||
current_version = "2.0.0"
|
current_version = "2.0.1"
|
||||||
version_pattern = "MAJOR.MINOR.PATCH"
|
version_pattern = "MAJOR.MINOR.PATCH"
|
||||||
commit_message = "Bump version {old_version} -> {new_version}"
|
commit_message = "Bump version {old_version} -> {new_version}"
|
||||||
commit = true
|
commit = true
|
||||||
|
@ -64,3 +84,12 @@ push = false
|
||||||
[tool.bumpver.file_patterns]
|
[tool.bumpver.file_patterns]
|
||||||
"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"']
|
"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"']
|
||||||
"src/frostfs_testlib/__init__.py" = ["{version}"]
|
"src/frostfs_testlib/__init__.py" = ["{version}"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
filterwarnings = [
|
||||||
|
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
|
||||||
|
]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
|
||||||
|
[project.entry-points.pytest11]
|
||||||
|
testlib = "frostfs_testlib"
|
|
@ -1,6 +1,5 @@
|
||||||
allure-python-commons==2.9.45
|
allure-python-commons==2.13.2
|
||||||
docker==4.4.0
|
docker==4.4.0
|
||||||
importlib_metadata==5.0.0
|
|
||||||
neo-mamba==1.0.0
|
neo-mamba==1.0.0
|
||||||
paramiko==2.10.3
|
paramiko==2.10.3
|
||||||
pexpect==4.8.0
|
pexpect==4.8.0
|
||||||
|
@ -9,14 +8,15 @@ docstring_parser==0.15
|
||||||
testrail-api==1.12.0
|
testrail-api==1.12.0
|
||||||
tenacity==8.0.1
|
tenacity==8.0.1
|
||||||
pytest==7.1.2
|
pytest==7.1.2
|
||||||
boto3==1.16.33
|
boto3==1.35.30
|
||||||
boto3-stubs[essential]==1.16.33
|
boto3-stubs[essential]==1.35.30
|
||||||
|
|
||||||
# Dev dependencies
|
# Dev dependencies
|
||||||
black==22.8.0
|
black==22.8.0
|
||||||
bumpver==2022.1118
|
bumpver==2022.1118
|
||||||
isort==5.12.0
|
isort==5.12.0
|
||||||
pre-commit==2.20.0
|
pre-commit==2.20.0
|
||||||
|
pylint==2.17.4
|
||||||
|
|
||||||
# Packaging dependencies
|
# Packaging dependencies
|
||||||
build==0.8.0
|
build==0.8.0
|
||||||
|
|
|
@ -1 +1,4 @@
|
||||||
__version__ = "2.0.0"
|
__version__ = "2.0.1"
|
||||||
|
|
||||||
|
from .fixtures import configure_testlib, hosting, temp_directory
|
||||||
|
from .hooks import pytest_collection_modifyitems
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from frostfs_testlib.analytics import test_case
|
from frostfs_testlib.analytics import test_case
|
||||||
from frostfs_testlib.analytics.test_case import TestCasePriority
|
from frostfs_testlib.analytics.test_case import TestCasePriority
|
||||||
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
|
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
|
||||||
from frostfs_testlib.analytics.test_exporter import TestExporter
|
from frostfs_testlib.analytics.test_exporter import TСExporter
|
||||||
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter
|
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter
|
||||||
|
|
|
@ -6,6 +6,7 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType
|
||||||
|
|
||||||
DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE))
|
DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE))
|
||||||
|
|
||||||
|
|
||||||
class TestCase:
|
class TestCase:
|
||||||
"""
|
"""
|
||||||
Test case object implementation for use in collector and exporters
|
Test case object implementation for use in collector and exporters
|
||||||
|
@ -106,7 +107,9 @@ class TestCaseCollector:
|
||||||
# Read test_case suite and section name from test class if possible and get test function from class
|
# Read test_case suite and section name from test class if possible and get test function from class
|
||||||
if test.cls:
|
if test.cls:
|
||||||
suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name)
|
suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name)
|
||||||
suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name)
|
suite_section_name = test.cls.__dict__.get(
|
||||||
|
"__test_case_suite_section__", suite_section_name
|
||||||
|
)
|
||||||
test_function = test.cls.__dict__[test.originalname]
|
test_function = test.cls.__dict__[test.originalname]
|
||||||
else:
|
else:
|
||||||
# If no test class, read test function from module
|
# If no test class, read test function from module
|
||||||
|
@ -117,7 +120,9 @@ class TestCaseCollector:
|
||||||
test_case_title = test_function.__dict__.get("__test_case_title__", None)
|
test_case_title = test_function.__dict__.get("__test_case_title__", None)
|
||||||
test_case_priority = test_function.__dict__.get("__test_case_priority__", None)
|
test_case_priority = test_function.__dict__.get("__test_case_priority__", None)
|
||||||
suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name)
|
suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name)
|
||||||
suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name)
|
suite_section_name = test_function.__dict__.get(
|
||||||
|
"__test_case_suite_section__", suite_section_name
|
||||||
|
)
|
||||||
|
|
||||||
# Parce test_steps if they define in __doc__
|
# Parce test_steps if they define in __doc__
|
||||||
doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE)
|
doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE)
|
||||||
|
@ -125,7 +130,9 @@ class TestCaseCollector:
|
||||||
if doc_string.short_description:
|
if doc_string.short_description:
|
||||||
test_case_description = doc_string.short_description
|
test_case_description = doc_string.short_description
|
||||||
if doc_string.long_description:
|
if doc_string.long_description:
|
||||||
test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}"
|
test_case_description = (
|
||||||
|
f"{doc_string.short_description}\r\n{doc_string.long_description}"
|
||||||
|
)
|
||||||
|
|
||||||
if doc_string.meta:
|
if doc_string.meta:
|
||||||
for meta in doc_string.meta:
|
for meta in doc_string.meta:
|
||||||
|
@ -140,25 +147,27 @@ class TestCaseCollector:
|
||||||
test_case_params = test_case_call_spec.id
|
test_case_params = test_case_call_spec.id
|
||||||
# Format title with params
|
# Format title with params
|
||||||
if test_case_title:
|
if test_case_title:
|
||||||
test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params)
|
test_case_title = self.__format_string_with_params__(
|
||||||
|
test_case_title, test_case_call_spec.params
|
||||||
|
)
|
||||||
# Format steps with params
|
# Format steps with params
|
||||||
if test_case_steps:
|
if test_case_steps:
|
||||||
for key, value in test_case_steps.items():
|
for key, value in test_case_steps.items():
|
||||||
value = self.__format_string_with_params__(value,test_case_call_spec.params)
|
value = self.__format_string_with_params__(value, test_case_call_spec.params)
|
||||||
test_case_steps[key] = value
|
test_case_steps[key] = value
|
||||||
|
|
||||||
# If there is set basic test case attributes create TestCase and return
|
# If there is set basic test case attributes create TestCase and return
|
||||||
if test_case_id and test_case_title and suite_name and suite_name:
|
if test_case_id and test_case_title and suite_name and suite_name:
|
||||||
test_case = TestCase(
|
test_case = TestCase(
|
||||||
id=test_case_id,
|
uuid_id=test_case_id,
|
||||||
title=test_case_title,
|
title=test_case_title,
|
||||||
description=test_case_description,
|
description=test_case_description,
|
||||||
priority=test_case_priority,
|
priority=test_case_priority,
|
||||||
steps=test_case_steps,
|
steps=test_case_steps,
|
||||||
params=test_case_params,
|
params=test_case_params,
|
||||||
suite_name=suite_name,
|
suite_name=suite_name,
|
||||||
suite_section_name=suite_section_name,
|
suite_section_name=suite_section_name,
|
||||||
)
|
)
|
||||||
return test_case
|
return test_case
|
||||||
# Return None if there is no enough information for return test case
|
# Return None if there is no enough information for return test case
|
||||||
return None
|
return None
|
||||||
|
@ -187,4 +196,4 @@ class TestCaseCollector:
|
||||||
test_case = self.__get_test_case_from_pytest_test__(test)
|
test_case = self.__get_test_case_from_pytest_test__(test)
|
||||||
if test_case:
|
if test_case:
|
||||||
test_cases.append(test_case)
|
test_cases.append(test_case)
|
||||||
return test_cases
|
return test_cases
|
||||||
|
|
|
@ -3,7 +3,8 @@ from abc import ABC, abstractmethod
|
||||||
from frostfs_testlib.analytics.test_collector import TestCase
|
from frostfs_testlib.analytics.test_collector import TestCase
|
||||||
|
|
||||||
|
|
||||||
class TestExporter(ABC):
|
# TODO: REMOVE ME
|
||||||
|
class TСExporter(ABC):
|
||||||
test_cases_cache = []
|
test_cases_cache = []
|
||||||
test_suites_cache = []
|
test_suites_cache = []
|
||||||
|
|
||||||
|
@ -46,9 +47,7 @@ class TestExporter(ABC):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def update_test_case(
|
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
|
||||||
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
|
|
||||||
) -> None:
|
|
||||||
"""
|
"""
|
||||||
Update test case in TMS
|
Update test case in TMS
|
||||||
"""
|
"""
|
||||||
|
@ -60,13 +59,11 @@ class TestExporter(ABC):
|
||||||
|
|
||||||
for test_case in test_cases:
|
for test_case in test_cases:
|
||||||
test_suite = self.get_or_create_test_suite(test_case.suite_name)
|
test_suite = self.get_or_create_test_suite(test_case.suite_name)
|
||||||
test_section = self.get_or_create_suite_section(
|
test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name)
|
||||||
test_suite, test_case.suite_section_name
|
|
||||||
)
|
|
||||||
test_case_in_tms = self.search_test_case_id(test_case.id)
|
test_case_in_tms = self.search_test_case_id(test_case.id)
|
||||||
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
|
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
|
||||||
|
|
||||||
if test_case_in_tms:
|
if test_case_in_tms:
|
||||||
self.update_test_case(test_case, test_case_in_tms)
|
self.update_test_case(test_case, test_case_in_tms, test_suite, test_section)
|
||||||
else:
|
else:
|
||||||
self.create_test_case(test_case)
|
self.create_test_case(test_case, test_suite, test_section)
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
from testrail_api import TestRailAPI
|
from testrail_api import TestRailAPI
|
||||||
|
|
||||||
from frostfs_testlib.analytics.test_collector import TestCase
|
from frostfs_testlib.analytics.test_collector import TestCase
|
||||||
from frostfs_testlib.analytics.test_exporter import TestExporter
|
from frostfs_testlib.analytics.test_exporter import TСExporter
|
||||||
|
|
||||||
|
|
||||||
class TestrailExporter(TestExporter):
|
class TestrailExporter(TСExporter):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
tr_url: str,
|
tr_url: str,
|
||||||
|
@ -62,19 +62,13 @@ class TestrailExporter(TestExporter):
|
||||||
It's help do not call TMS each time then we search test case
|
It's help do not call TMS each time then we search test case
|
||||||
"""
|
"""
|
||||||
for test_suite in self.test_suites_cache:
|
for test_suite in self.test_suites_cache:
|
||||||
self.test_cases_cache.extend(
|
self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]))
|
||||||
self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])
|
|
||||||
)
|
|
||||||
|
|
||||||
def search_test_case_id(self, test_case_id: str) -> object:
|
def search_test_case_id(self, test_case_id: str) -> object:
|
||||||
"""
|
"""
|
||||||
Find test cases in TestRail (cache) by ID
|
Find test cases in TestRail (cache) by ID
|
||||||
"""
|
"""
|
||||||
test_cases = [
|
test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id]
|
||||||
test_case
|
|
||||||
for test_case in self.test_cases_cache
|
|
||||||
if test_case["custom_autotest_name"] == test_case_id
|
|
||||||
]
|
|
||||||
|
|
||||||
if len(test_cases) > 1:
|
if len(test_cases) > 1:
|
||||||
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
|
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
|
||||||
|
@ -87,9 +81,7 @@ class TestrailExporter(TestExporter):
|
||||||
"""
|
"""
|
||||||
Get suite name with exact name from Testrail or create if not exist
|
Get suite name with exact name from Testrail or create if not exist
|
||||||
"""
|
"""
|
||||||
test_rail_suites = [
|
test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name]
|
||||||
suite for suite in self.test_suites_cache if suite["name"] == test_suite_name
|
|
||||||
]
|
|
||||||
|
|
||||||
if not test_rail_suites:
|
if not test_rail_suites:
|
||||||
test_rail_suite = self.api.suites.add_suite(
|
test_rail_suite = self.api.suites.add_suite(
|
||||||
|
@ -102,17 +94,13 @@ class TestrailExporter(TestExporter):
|
||||||
elif len(test_rail_suites) == 1:
|
elif len(test_rail_suites) == 1:
|
||||||
return test_rail_suites.pop()
|
return test_rail_suites.pop()
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}")
|
||||||
f"Too many results found in test rail for suite name {test_suite_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
|
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
|
||||||
"""
|
"""
|
||||||
Get suite section with exact name from Testrail or create new one if not exist
|
Get suite section with exact name from Testrail or create new one if not exist
|
||||||
"""
|
"""
|
||||||
test_rail_sections = [
|
test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name]
|
||||||
section for section in test_rail_suite["sections"] if section["name"] == section_name
|
|
||||||
]
|
|
||||||
|
|
||||||
if not test_rail_sections:
|
if not test_rail_sections:
|
||||||
test_rail_section = self.api.sections.add_section(
|
test_rail_section = self.api.sections.add_section(
|
||||||
|
@ -128,9 +116,7 @@ class TestrailExporter(TestExporter):
|
||||||
elif len(test_rail_sections) == 1:
|
elif len(test_rail_sections) == 1:
|
||||||
return test_rail_sections.pop()
|
return test_rail_sections.pop()
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Too many results found in test rail for section name {section_name}")
|
||||||
f"Too many results found in test rail for section name {section_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
|
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
|
||||||
"""
|
"""
|
||||||
|
@ -164,9 +150,7 @@ class TestrailExporter(TestExporter):
|
||||||
|
|
||||||
self.api.cases.add_case(**request_body)
|
self.api.cases.add_case(**request_body)
|
||||||
|
|
||||||
def update_test_case(
|
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
|
||||||
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
|
|
||||||
) -> None:
|
|
||||||
"""
|
"""
|
||||||
Update test case in Testrail
|
Update test case in Testrail
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
|
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
|
||||||
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
|
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
|
||||||
from frostfs_testlib.cli.frostfs_cli import FrostfsCli
|
from frostfs_testlib.cli.frostfs_cli import FrostfsCli
|
||||||
|
from frostfs_testlib.cli.generic_cli import GenericCli
|
||||||
from frostfs_testlib.cli.neogo import NeoGo, NetworkType
|
from frostfs_testlib.cli.neogo import NeoGo, NetworkType
|
||||||
|
|
|
@ -27,11 +27,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph deposit-notary",
|
"morph deposit-notary",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_balances(
|
def dump_balances(
|
||||||
|
@ -56,11 +52,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-balances",
|
"morph dump-balances",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_config(self, rpc_endpoint: str) -> CommandResult:
|
def dump_config(self, rpc_endpoint: str) -> CommandResult:
|
||||||
|
@ -74,11 +66,23 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-config",
|
"morph dump-config",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
)
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
|
||||||
},
|
"""Add/update global config value in the FrostFS network.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
set_key_value: key1=val1 [key2=val2 ...]
|
||||||
|
alphabet_wallets: Path to alphabet wallets dir
|
||||||
|
rpc_endpoint: N3 RPC node endpoint
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
f"morph set-config {set_key_value}",
|
||||||
|
**{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_containers(
|
def dump_containers(
|
||||||
|
@ -101,14 +105,10 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-containers",
|
"morph dump-containers",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
|
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult:
|
||||||
"""Dump deployed contract hashes.
|
"""Dump deployed contract hashes.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -119,15 +119,11 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-hashes",
|
"morph dump-hashes",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def force_new_epoch(
|
def force_new_epoch(
|
||||||
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""Create new FrostFS epoch event in the side chain.
|
"""Create new FrostFS epoch event in the side chain.
|
||||||
|
|
||||||
|
@ -140,11 +136,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph force-new-epoch",
|
"morph force-new-epoch",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate_alphabet(
|
def generate_alphabet(
|
||||||
|
@ -165,11 +157,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph generate-alphabet",
|
"morph generate-alphabet",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate_storage_wallet(
|
def generate_storage_wallet(
|
||||||
|
@ -192,11 +180,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph generate-storage-wallet",
|
"morph generate-storage-wallet",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def init(
|
def init(
|
||||||
|
@ -219,7 +203,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
container_alias_fee: Container alias fee (default 500).
|
container_alias_fee: Container alias fee (default 500).
|
||||||
container_fee: Container registration fee (default 1000).
|
container_fee: Container registration fee (default 1000).
|
||||||
contracts: Path to archive with compiled FrostFS contracts
|
contracts: Path to archive with compiled FrostFS contracts
|
||||||
(default fetched from latest github release).
|
(default fetched from latest git release).
|
||||||
epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240).
|
epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240).
|
||||||
homomorphic_disabled: Disable object homomorphic hashing.
|
homomorphic_disabled: Disable object homomorphic hashing.
|
||||||
local_dump: Path to the blocks dump file.
|
local_dump: Path to the blocks dump file.
|
||||||
|
@ -232,11 +216,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph init",
|
"morph init",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def refill_gas(
|
def refill_gas(
|
||||||
|
@ -259,11 +239,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph refill-gas",
|
"morph refill-gas",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def restore_containers(
|
def restore_containers(
|
||||||
|
@ -286,11 +262,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph restore-containers",
|
"morph restore-containers",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def set_policy(
|
def set_policy(
|
||||||
|
@ -340,7 +312,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
Args:
|
Args:
|
||||||
alphabet_wallets: Path to alphabet wallets dir.
|
alphabet_wallets: Path to alphabet wallets dir.
|
||||||
contracts: Path to archive with compiled FrostFS contracts
|
contracts: Path to archive with compiled FrostFS contracts
|
||||||
(default fetched from latest github release).
|
(default fetched from latest git release).
|
||||||
rpc_endpoint: N3 RPC node endpoint.
|
rpc_endpoint: N3 RPC node endpoint.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -348,17 +320,13 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph update-contracts",
|
"morph update-contracts",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def remove_nodes(
|
def remove_nodes(
|
||||||
self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
""" Move node to the Offline state in the candidates list
|
"""Move node to the Offline state in the candidates list
|
||||||
and tick an epoch to update the netmap using frostfs-adm
|
and tick an epoch to update the netmap using frostfs-adm
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -371,12 +339,127 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
if not len(node_netmap_keys):
|
if not len(node_netmap_keys):
|
||||||
raise AttributeError("Got empty node_netmap_keys list")
|
raise AttributeError("Got empty node_netmap_keys list")
|
||||||
|
|
||||||
return self._execute(
|
return self._execute(
|
||||||
f"morph remove-nodes {' '.join(node_netmap_keys)}",
|
f"morph remove-nodes {' '.join(node_netmap_keys)}",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]},
|
||||||
param: param_value
|
)
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self", "node_netmap_keys"]
|
def add_rule(
|
||||||
},
|
self,
|
||||||
)
|
chain_id: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
rule: Optional[list[str]] = None,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
chain_id_hex: Optional[bool] = None,
|
||||||
|
chain_name: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chain-id: Assign ID to the parsed chain
|
||||||
|
chain-id-hex: Flag to parse chain ID as hex
|
||||||
|
path: Path to encoded chain in JSON or binary format
|
||||||
|
rule: Rule statement
|
||||||
|
target-name: Resource name in APE resource name format
|
||||||
|
target-type: Resource type(container/namespace)
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"morph ape add-rule-chain",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_rule(
|
||||||
|
self,
|
||||||
|
chain_id: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
chain_id_hex: Optional[bool] = None,
|
||||||
|
chain_name: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chain-id string Chain id
|
||||||
|
chain-id-hex Flag to parse chain ID as hex
|
||||||
|
target-name string Resource name in APE resource name format
|
||||||
|
target-type string Resource type(container/namespace)
|
||||||
|
timeout duration Timeout for an operation (default 15s)
|
||||||
|
wallet string Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"morph ape get-rule-chain",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_rules(
|
||||||
|
self,
|
||||||
|
target_type: str,
|
||||||
|
target_name: Optional[str] = None,
|
||||||
|
rpc_endpoint: Optional[str] = None,
|
||||||
|
chain_name: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target-name: Resource name in APE resource name format
|
||||||
|
target-type: Resource type(container/namespace)
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"morph ape list-rule-chains",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def remove_rule(
|
||||||
|
self,
|
||||||
|
chain_id: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
all: Optional[bool] = None,
|
||||||
|
chain_name: Optional[str] = None,
|
||||||
|
chain_id_hex: Optional[bool] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
all: Remove all chains
|
||||||
|
chain-id: Assign ID to the parsed chain
|
||||||
|
chain-id-hex: Flag to parse chain ID as hex
|
||||||
|
target-name: Resource name in APE resource name format
|
||||||
|
target-type: Resource type(container/namespace)
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"morph ape rm-rule-chain",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
|
@ -6,8 +6,8 @@ from frostfs_testlib.shell import Shell
|
||||||
|
|
||||||
|
|
||||||
class FrostfsAuthmate:
|
class FrostfsAuthmate:
|
||||||
secret: Optional[FrostfsAuthmateSecret] = None
|
secret: FrostfsAuthmateSecret
|
||||||
version: Optional[FrostfsAuthmateVersion] = None
|
version: FrostfsAuthmateVersion
|
||||||
|
|
||||||
def __init__(self, shell: Shell, frostfs_authmate_exec_path: str):
|
def __init__(self, shell: Shell, frostfs_authmate_exec_path: str):
|
||||||
self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path)
|
self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path)
|
||||||
|
|
|
@ -44,7 +44,6 @@ class FrostfsAuthmateSecret(CliCommand):
|
||||||
wallet: str,
|
wallet: str,
|
||||||
wallet_password: str,
|
wallet_password: str,
|
||||||
peer: str,
|
peer: str,
|
||||||
bearer_rules: str,
|
|
||||||
gate_public_key: Union[str, list[str]],
|
gate_public_key: Union[str, list[str]],
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
container_id: Optional[str] = None,
|
container_id: Optional[str] = None,
|
||||||
|
|
|
@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand):
|
||||||
Well-known system object headers start with '$Object:' prefix.
|
Well-known system object headers start with '$Object:' prefix.
|
||||||
User defined headers start without prefix.
|
User defined headers start without prefix.
|
||||||
Read more about filter keys at:
|
Read more about filter keys at:
|
||||||
http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter
|
https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter
|
||||||
Match is '=' for matching and '!=' for non-matching filter.
|
Match is '=' for matching and '!=' for non-matching filter.
|
||||||
Value is a valid unicode string corresponding to object or request header value.
|
Value is a valid unicode string corresponding to object or request header value.
|
||||||
|
|
||||||
|
|
70
src/frostfs_testlib/cli/frostfs_cli/ape_manager.py
Normal file
70
src/frostfs_testlib/cli/frostfs_cli/ape_manager.py
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.cli_command import CliCommand
|
||||||
|
from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
|
|
||||||
|
class FrostfsCliApeManager(CliCommand):
|
||||||
|
"""Operations with APE manager."""
|
||||||
|
|
||||||
|
def add(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
chain_id: Optional[str] = None,
|
||||||
|
chain_id_hex: Optional[str] = None,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
rule: Optional[str] | Optional[list[str]] = None,
|
||||||
|
target_name: Optional[str] = None,
|
||||||
|
target_type: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Add rule chain for a target."""
|
||||||
|
|
||||||
|
return self._execute(
|
||||||
|
"ape-manager add",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def list(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
target_name: Optional[str] = None,
|
||||||
|
target_type: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Generate APE override by target and APE chains. Util command.
|
||||||
|
|
||||||
|
Generated APE override can be dumped to a file in JSON format that is passed to
|
||||||
|
"create" command.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._execute(
|
||||||
|
"ape-manager list",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def remove(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
chain_id: Optional[str] = None,
|
||||||
|
chain_id_hex: Optional[str] = None,
|
||||||
|
target_name: Optional[str] = None,
|
||||||
|
target_type: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Generate APE override by target and APE chains. Util command.
|
||||||
|
|
||||||
|
Generated APE override can be dumped to a file in JSON format that is passed to
|
||||||
|
"create" command.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._execute(
|
||||||
|
"ape-manager remove",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
54
src/frostfs_testlib/cli/frostfs_cli/bearer.py
Normal file
54
src/frostfs_testlib/cli/frostfs_cli/bearer.py
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.cli_command import CliCommand
|
||||||
|
from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
|
|
||||||
|
class FrostfsCliBearer(CliCommand):
|
||||||
|
def create(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
out: str,
|
||||||
|
issued_at: Optional[str] = None,
|
||||||
|
expire_at: Optional[str] = None,
|
||||||
|
not_valid_before: Optional[str] = None,
|
||||||
|
ape: Optional[str] = None,
|
||||||
|
eacl: Optional[str] = None,
|
||||||
|
owner: Optional[str] = None,
|
||||||
|
json: Optional[bool] = False,
|
||||||
|
impersonate: Optional[bool] = False,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Create bearer token.
|
||||||
|
|
||||||
|
All epoch flags can be specified relative to the current epoch with the +n syntax.
|
||||||
|
In this case --rpc-endpoint flag should be specified and the epoch in bearer token
|
||||||
|
is set to current epoch + n.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"bearer create",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def generate_ape_override(
|
||||||
|
self,
|
||||||
|
chain_id: Optional[str] = None,
|
||||||
|
chain_id_hex: Optional[str] = None,
|
||||||
|
cid: Optional[str] = None,
|
||||||
|
output: Optional[str] = None,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
rule: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Generate APE override by target and APE chains. Util command.
|
||||||
|
|
||||||
|
Generated APE override can be dumped to a file in JSON format that is passed to
|
||||||
|
"create" command.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._execute(
|
||||||
|
"bearer generate-ape-override",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
|
@ -2,12 +2,16 @@ from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
|
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
|
||||||
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
|
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer
|
||||||
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
|
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
|
||||||
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
|
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
|
||||||
from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
|
from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
|
||||||
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
|
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
|
||||||
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
|
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
|
||||||
from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup
|
from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree
|
||||||
from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil
|
from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil
|
||||||
from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion
|
from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
|
@ -24,6 +28,7 @@ class FrostfsCli:
|
||||||
storagegroup: FrostfsCliStorageGroup
|
storagegroup: FrostfsCliStorageGroup
|
||||||
util: FrostfsCliUtil
|
util: FrostfsCliUtil
|
||||||
version: FrostfsCliVersion
|
version: FrostfsCliVersion
|
||||||
|
control: FrostfsCliControl
|
||||||
|
|
||||||
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
|
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
|
||||||
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)
|
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
@ -36,3 +41,7 @@ class FrostfsCli:
|
||||||
self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file)
|
self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
|
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
|
|
@ -8,12 +8,16 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def create(
|
def create(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: Optional[str] = None,
|
||||||
|
nns_zone: Optional[str] = None,
|
||||||
|
nns_name: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
basic_acl: Optional[str] = None,
|
basic_acl: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
disable_timestamp: bool = False,
|
disable_timestamp: bool = False,
|
||||||
|
force: bool = False,
|
||||||
|
trace: bool = False,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
nonce: Optional[str] = None,
|
nonce: Optional[str] = None,
|
||||||
policy: Optional[str] = None,
|
policy: Optional[str] = None,
|
||||||
|
@ -35,6 +39,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write',
|
basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write',
|
||||||
'private', 'eacl-public-read' (default "private").
|
'private', 'eacl-public-read' (default "private").
|
||||||
disable_timestamp: Disable timestamp container attribute.
|
disable_timestamp: Disable timestamp container attribute.
|
||||||
|
force: Skip placement validity check.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
name: Container name attribute.
|
name: Container name attribute.
|
||||||
nonce: UUIDv4 nonce value for container.
|
nonce: UUIDv4 nonce value for container.
|
||||||
policy: QL-encoded or JSON-encoded placement policy or path to file with it.
|
policy: QL-encoded or JSON-encoded placement policy or path to file with it.
|
||||||
|
@ -45,6 +51,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
timeout: Timeout for the operation (default 15s).
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
nns_zone: Container nns zone attribute.
|
||||||
|
nns_name: Container nns name attribute.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -57,15 +65,15 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def delete(
|
def delete(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
force: bool = False,
|
force: bool = False,
|
||||||
timeout: Optional[str] = None,
|
trace: bool = False,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
Delete an existing container.
|
Delete an existing container.
|
||||||
|
@ -75,13 +83,13 @@ class FrostfsCliContainer(CliCommand):
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
await_mode: Block execution until container is removed.
|
await_mode: Block execution until container is removed.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
force: Do not check whether container contains locks and remove immediately.
|
force: Do not check whether container contains locks and remove immediately.
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
session: Path to a JSON-encoded container session token.
|
session: Path to a JSON-encoded container session token.
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
timeout: Timeout for the operation (default 15s).
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -95,12 +103,14 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def get(
|
def get(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
to: Optional[str] = None,
|
to: Optional[str] = None,
|
||||||
json_mode: bool = False,
|
json_mode: bool = False,
|
||||||
|
trace: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
|
@ -113,12 +123,14 @@ class FrostfsCliContainer(CliCommand):
|
||||||
await_mode: Block execution until container is removed.
|
await_mode: Block execution until container is removed.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
json_mode: Print or dump container in JSON format.
|
json_mode: Print or dump container in JSON format.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
to: Path to dump encoded container.
|
to: Path to dump encoded container.
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
timeout: Timeout for the operation (default 15s).
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
generate_key: Generate a new private key.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -131,9 +143,10 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def get_eacl(
|
def get_eacl(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
to: Optional[str] = None,
|
to: Optional[str] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -150,11 +163,14 @@ class FrostfsCliContainer(CliCommand):
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
to: Path to dump encoded container.
|
to: Path to dump encoded container.
|
||||||
|
json_mode: Print or dump container in JSON format.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
session: Path to a JSON-encoded container session token.
|
session: Path to a JSON-encoded container session token.
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
timeout: Timeout for the operation (default 15s).
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
generate_key: Generate a new private key.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -168,8 +184,10 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def list(
|
def list(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
name: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
owner: Optional[str] = None,
|
owner: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -181,12 +199,15 @@ class FrostfsCliContainer(CliCommand):
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
|
name: List containers by the attribute name.
|
||||||
owner: Owner of containers (omit to use owner from private key).
|
owner: Owner of containers (omit to use owner from private key).
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
timeout: Timeout for the operation (default 15s).
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
generate_key: Generate a new private key.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -199,9 +220,12 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def list_objects(
|
def list_objects(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
bearer: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
|
trace: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
|
@ -212,11 +236,14 @@ class FrostfsCliContainer(CliCommand):
|
||||||
Args:
|
Args:
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
timeout: Timeout for the operation (default 15s).
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
generate_key: Generate a new private key.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -226,11 +253,12 @@ class FrostfsCliContainer(CliCommand):
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# TODO Deprecated method with 0.42
|
||||||
def set_eacl(
|
def set_eacl(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
table: Optional[str] = None,
|
table: Optional[str] = None,
|
||||||
|
@ -262,3 +290,43 @@ class FrostfsCliContainer(CliCommand):
|
||||||
"container set-eacl",
|
"container set-eacl",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def search_node(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
ttl: Optional[int] = None,
|
||||||
|
from_file: Optional[str] = None,
|
||||||
|
trace: bool = False,
|
||||||
|
short: Optional[bool] = True,
|
||||||
|
xhdr: Optional[dict] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Show the nodes participating in the container in the current epoch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
rpc_endpoint: string Remote host address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
|
cid: Container ID.
|
||||||
|
address: Address of wallet account.
|
||||||
|
ttl: TTL value in request meta header (default 2).
|
||||||
|
from_file: string File path with encoded container
|
||||||
|
timeout: duration Timeout for the operation (default 15 s)
|
||||||
|
short: shorten the output of node information.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
|
xhdr: Dict with request X-Headers.
|
||||||
|
generate_key: Generate a new private key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
"""
|
||||||
|
from_str = f"--from {from_file}" if from_file else ""
|
||||||
|
|
||||||
|
return self._execute(
|
||||||
|
f"container nodes {from_str}",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
|
||||||
|
)
|
||||||
|
|
232
src/frostfs_testlib/cli/frostfs_cli/control.py
Normal file
232
src/frostfs_testlib/cli/frostfs_cli/control.py
Normal file
|
@ -0,0 +1,232 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.cli_command import CliCommand
|
||||||
|
from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
|
|
||||||
|
class FrostfsCliControl(CliCommand):
|
||||||
|
def set_status(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
status: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
force: Optional[bool] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Set status of the storage node in FrostFS network map
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
force: Force turning to local maintenance
|
||||||
|
status: New netmap status keyword ('online', 'offline', 'maintenance')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control set-status",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def healthcheck(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Health check for FrostFS storage nodes
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control healthcheck",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def drop_objects(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
objects: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
objects: List of object addresses to be removed in string format
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control drop-objects",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_rule(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
chain_id: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
rule: Optional[list[str]] = None,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
chain_id_hex: Optional[bool] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
chain-id: Assign ID to the parsed chain
|
||||||
|
chain-id-hex: Flag to parse chain ID as hex
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
path: Path to encoded chain in JSON or binary format
|
||||||
|
rule: Rule statement
|
||||||
|
target-name: Resource name in APE resource name format
|
||||||
|
target-type: Resource type(container/namespace)
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control add-rule",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_rule(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
chain_id: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
chain_id_hex: Optional[bool] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address string Address of wallet account
|
||||||
|
chain-id string Chain id
|
||||||
|
chain-id-hex Flag to parse chain ID as hex
|
||||||
|
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
target-name string Resource name in APE resource name format
|
||||||
|
target-type string Resource type(container/namespace)
|
||||||
|
timeout duration Timeout for an operation (default 15s)
|
||||||
|
wallet string Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control get-rule",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_rules(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
target-name: Resource name in APE resource name format
|
||||||
|
target-type: Resource type(container/namespace)
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control list-rules",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_targets(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
chain_name: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
chain-name: Chain name(ingress|s3)
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control list-targets",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def remove_rule(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
chain_id: str,
|
||||||
|
target_name: str,
|
||||||
|
target_type: str,
|
||||||
|
all: Optional[bool] = None,
|
||||||
|
chain_id_hex: Optional[bool] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Drop objects from the node's local storage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
all: Remove all chains
|
||||||
|
chain-id: Assign ID to the parsed chain
|
||||||
|
chain-id-hex: Flag to parse chain ID as hex
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
target-name: Resource name in APE resource name format
|
||||||
|
target-type: Resource type(container/namespace)
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control remove-rule",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
|
@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def epoch(
|
def epoch(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def netinfo(
|
def netinfo(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def nodeinfo(
|
def nodeinfo(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
json: bool = False,
|
json: bool = False,
|
||||||
|
@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def snapshot(
|
def snapshot(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
|
|
@ -8,11 +8,12 @@ class FrostfsCliObject(CliCommand):
|
||||||
def delete(
|
def delete(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -25,6 +26,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
|
generate_key: Generate new private key.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
session: Filepath to a JSON- or binary-encoded token of the object DELETE session.
|
session: Filepath to a JSON- or binary-encoded token of the object DELETE session.
|
||||||
|
@ -44,11 +46,12 @@ class FrostfsCliObject(CliCommand):
|
||||||
def get(
|
def get(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
header: Optional[str] = None,
|
header: Optional[str] = None,
|
||||||
no_progress: bool = False,
|
no_progress: bool = False,
|
||||||
|
@ -66,6 +69,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
file: File to write object payload to. Default: stdout.
|
file: File to write object payload to. Default: stdout.
|
||||||
|
generate_key: Generate new private key.
|
||||||
header: File to write header to. Default: stdout.
|
header: File to write header to. Default: stdout.
|
||||||
no_progress: Do not show progress bar.
|
no_progress: Do not show progress bar.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
|
@ -88,11 +92,12 @@ class FrostfsCliObject(CliCommand):
|
||||||
def hash(
|
def hash(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
range: Optional[str] = None,
|
range: Optional[str] = None,
|
||||||
salt: Optional[str] = None,
|
salt: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
@ -108,6 +113,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
|
generate_key: Generate new private key.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
range: Range to take hash from in the form offset1:length1,...
|
range: Range to take hash from in the form offset1:length1,...
|
||||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
|
@ -124,19 +130,18 @@ class FrostfsCliObject(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"object hash",
|
"object hash",
|
||||||
**{
|
**{param: value for param, value in locals().items() if param not in ["self", "params"]},
|
||||||
param: value for param, value in locals().items() if param not in ["self", "params"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def head(
|
def head(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
json_mode: bool = False,
|
json_mode: bool = False,
|
||||||
main_only: bool = False,
|
main_only: bool = False,
|
||||||
|
@ -155,6 +160,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
file: File to write object payload to. Default: stdout.
|
file: File to write object payload to. Default: stdout.
|
||||||
|
generate_key: Generate new private key.
|
||||||
json_mode: Marshal output in JSON.
|
json_mode: Marshal output in JSON.
|
||||||
main_only: Return only main fields.
|
main_only: Return only main fields.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
|
@ -178,13 +184,14 @@ class FrostfsCliObject(CliCommand):
|
||||||
def lock(
|
def lock(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
lifetime: Optional[int] = None,
|
lifetime: Optional[int] = None,
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -197,6 +204,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
|
generate_key: Generate new private key.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
lifetime: Lock lifetime.
|
lifetime: Lock lifetime.
|
||||||
expire_at: Lock expiration epoch.
|
expire_at: Lock expiration epoch.
|
||||||
|
@ -218,12 +226,14 @@ class FrostfsCliObject(CliCommand):
|
||||||
def put(
|
def put(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
file: str,
|
file: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
|
copies_number: Optional[int] = None,
|
||||||
disable_filename: bool = False,
|
disable_filename: bool = False,
|
||||||
disable_timestamp: bool = False,
|
disable_timestamp: bool = False,
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
|
@ -241,11 +251,13 @@ class FrostfsCliObject(CliCommand):
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2.
|
attributes: User attributes in form of Key1=Value1,Key2=Value2.
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
|
copies_number: Number of copies of the object to store within the RPC call.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
disable_filename: Do not set well-known filename attribute.
|
disable_filename: Do not set well-known filename attribute.
|
||||||
disable_timestamp: Do not set well-known timestamp attribute.
|
disable_timestamp: Do not set well-known timestamp attribute.
|
||||||
expire_at: Last epoch in the life of the object.
|
expire_at: Last epoch in the life of the object.
|
||||||
file: File with object payload.
|
file: File with object payload.
|
||||||
|
generate_key: Generate new private key.
|
||||||
no_progress: Do not show progress bar.
|
no_progress: Do not show progress bar.
|
||||||
notify: Object notification in the form of *epoch*:*topic*; '-'
|
notify: Object notification in the form of *epoch*:*topic*; '-'
|
||||||
topic means using default.
|
topic means using default.
|
||||||
|
@ -267,12 +279,13 @@ class FrostfsCliObject(CliCommand):
|
||||||
def range(
|
def range(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
range: str,
|
range: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
json_mode: bool = False,
|
json_mode: bool = False,
|
||||||
raw: bool = False,
|
raw: bool = False,
|
||||||
|
@ -289,6 +302,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
file: File to write object payload to. Default: stdout.
|
file: File to write object payload to. Default: stdout.
|
||||||
|
generate_key: Generate new private key.
|
||||||
json_mode: Marshal output in JSON.
|
json_mode: Marshal output in JSON.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
range: Range to take data from in the form offset:length.
|
range: Range to take data from in the form offset:length.
|
||||||
|
@ -311,10 +325,11 @@ class FrostfsCliObject(CliCommand):
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
filters: Optional[list] = None,
|
filters: Optional[list] = None,
|
||||||
oid: Optional[str] = None,
|
oid: Optional[str] = None,
|
||||||
phy: bool = False,
|
phy: bool = False,
|
||||||
|
@ -332,6 +347,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
filters: Repeated filter expressions or files with protobuf JSON.
|
filters: Repeated filter expressions or files with protobuf JSON.
|
||||||
|
generate_key: Generate new private key.
|
||||||
oid: Object ID.
|
oid: Object ID.
|
||||||
phy: Search physically stored objects.
|
phy: Search physically stored objects.
|
||||||
root: Search for user objects.
|
root: Search for user objects.
|
||||||
|
@ -349,3 +365,46 @@ class FrostfsCliObject(CliCommand):
|
||||||
"object search",
|
"object search",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def nodes(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
cid: str,
|
||||||
|
oid: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional[bool] = None,
|
||||||
|
trace: bool = False,
|
||||||
|
root: bool = False,
|
||||||
|
verify_presence_all: bool = False,
|
||||||
|
json: bool = False,
|
||||||
|
ttl: Optional[int] = None,
|
||||||
|
xhdr: Optional[dict] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Search object nodes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account.
|
||||||
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
|
cid: Container ID.
|
||||||
|
generate_key: Generate new private key.
|
||||||
|
oid: Object ID.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
|
root: Search for user objects.
|
||||||
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
|
verify_presence_all: Verify the actual presence of the object on all netmap nodes.
|
||||||
|
ttl: TTL value in request meta header (default 2).
|
||||||
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
|
xhdr: Dict with request X-Headers.
|
||||||
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"object nodes",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
|
@ -9,7 +9,6 @@ class FrostfsCliSession(CliCommand):
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: str,
|
||||||
wallet_password: str,
|
|
||||||
out: str,
|
out: str,
|
||||||
lifetime: Optional[int] = None,
|
lifetime: Optional[int] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
@ -30,12 +29,7 @@ class FrostfsCliSession(CliCommand):
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
"""
|
"""
|
||||||
return self._execute_with_password(
|
return self._execute(
|
||||||
"session create",
|
"session create",
|
||||||
wallet_password,
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
**{
|
|
||||||
param: value
|
|
||||||
for param, value in locals().items()
|
|
||||||
if param not in ["self", "wallet_password"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand):
|
||||||
def set_mode(
|
def set_mode(
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
wallet: str,
|
|
||||||
wallet_password: str,
|
|
||||||
mode: str,
|
mode: str,
|
||||||
id: Optional[list[str]],
|
id: Optional[list[str]] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
wallet_password: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
all: bool = False,
|
all: bool = False,
|
||||||
clear_errors: bool = False,
|
clear_errors: bool = False,
|
||||||
|
@ -65,14 +65,15 @@ class FrostfsCliShards(CliCommand):
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
"""
|
"""
|
||||||
|
if not wallet_password:
|
||||||
|
return self._execute(
|
||||||
|
"control shards set-mode",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
return self._execute_with_password(
|
return self._execute_with_password(
|
||||||
"control shards set-mode",
|
"control shards set-mode",
|
||||||
wallet_password,
|
wallet_password,
|
||||||
**{
|
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
||||||
param: value
|
|
||||||
for param, value in locals().items()
|
|
||||||
if param not in ["self", "wallet_password"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump(
|
def dump(
|
||||||
|
@ -105,18 +106,14 @@ class FrostfsCliShards(CliCommand):
|
||||||
return self._execute_with_password(
|
return self._execute_with_password(
|
||||||
"control shards dump",
|
"control shards dump",
|
||||||
wallet_password,
|
wallet_password,
|
||||||
**{
|
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
||||||
param: value
|
|
||||||
for param, value in locals().items()
|
|
||||||
if param not in ["self", "wallet_password"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def list(
|
def list(
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
wallet: str,
|
wallet: Optional[str] = None,
|
||||||
wallet_password: str,
|
wallet_password: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
json_mode: bool = False,
|
json_mode: bool = False,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
|
@ -135,12 +132,130 @@ class FrostfsCliShards(CliCommand):
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
"""
|
"""
|
||||||
|
if not wallet_password:
|
||||||
|
return self._execute(
|
||||||
|
"control shards list",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
return self._execute_with_password(
|
return self._execute_with_password(
|
||||||
"control shards list",
|
"control shards list",
|
||||||
wallet_password,
|
wallet_password,
|
||||||
**{
|
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
||||||
param: value
|
)
|
||||||
for param, value in locals().items()
|
|
||||||
if param not in ["self", "wallet_password"]
|
def evacuation_start(
|
||||||
},
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
id: Optional[str] = None,
|
||||||
|
scope: Optional[str] = None,
|
||||||
|
all: bool = False,
|
||||||
|
no_errors: bool = True,
|
||||||
|
await_mode: bool = False,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
no_progress: bool = False,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Objects evacuation from shard to other shards.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
all: Process all shards
|
||||||
|
await: Block execution until evacuation is completed
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
id: List of shard IDs in base58 encoding
|
||||||
|
no_errors: Skip invalid/unreadable objects (default true)
|
||||||
|
no_progress: Print progress if await provided
|
||||||
|
scope: Evacuation scope; possible values: trees, objects, all (default "all")
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control shards evacuation start",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def evacuation_reset(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Reset evacuate objects from shard to other shards status.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control shards evacuation reset",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def evacuation_stop(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Stop running evacuate process from shard to other shards.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control shards evacuation stop",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def evacuation_status(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Get evacuate objects from shard to other shards status.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control shards evacuation status",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None):
|
||||||
|
"""
|
||||||
|
Detach and close the shards
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
id: List of shard IDs in base58 encoding
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control shards detach",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
53
src/frostfs_testlib/cli/frostfs_cli/tree.py
Normal file
53
src/frostfs_testlib/cli/frostfs_cli/tree.py
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.cli_command import CliCommand
|
||||||
|
from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
|
|
||||||
|
class FrostfsCliTree(CliCommand):
|
||||||
|
def healthcheck(
|
||||||
|
self,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
rpc_endpoint: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Get internal balance of FrostFS account
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account.
|
||||||
|
owner: Owner of balance account (omit to use owner from private key).
|
||||||
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"tree healthcheck",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def list(
|
||||||
|
self,
|
||||||
|
cid: str,
|
||||||
|
rpc_endpoint: Optional[str] = None,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Get Tree List
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cid: Container ID.
|
||||||
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
|
timeout: duration Timeout for the operation (default 15 s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"tree list",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
|
@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
class FrostfsCliUtil(CliCommand):
|
class FrostfsCliUtil(CliCommand):
|
||||||
def sign_bearer_token(
|
def sign_bearer_token(
|
||||||
self,
|
self,
|
||||||
wallet: str,
|
from_file: str,
|
||||||
from_file: str,
|
to_file: str,
|
||||||
to_file: str,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
json: Optional[bool] = False,
|
json: Optional[bool] = False,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
Sign bearer token to use it in requests.
|
Sign bearer token to use it in requests.
|
||||||
|
@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand):
|
||||||
|
|
||||||
def sign_session_token(
|
def sign_session_token(
|
||||||
self,
|
self,
|
||||||
wallet: str,
|
|
||||||
from_file: str,
|
from_file: str,
|
||||||
to_file: str,
|
to_file: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
|
@ -54,3 +54,11 @@ class FrostfsCliUtil(CliCommand):
|
||||||
"util sign session-token",
|
"util sign session-token",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False):
|
||||||
|
"""Convert representation of extended ACL table."""
|
||||||
|
|
||||||
|
return self._execute(
|
||||||
|
"util convert eacl",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
30
src/frostfs_testlib/cli/generic_cli.py
Normal file
30
src/frostfs_testlib/cli/generic_cli.py
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.hosting.interfaces import Host
|
||||||
|
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
|
||||||
|
|
||||||
|
|
||||||
|
class GenericCli(object):
|
||||||
|
def __init__(self, cli_name: str, host: Host) -> None:
|
||||||
|
self.host = host
|
||||||
|
self.cli_name = cli_name
|
||||||
|
|
||||||
|
def __call__(
|
||||||
|
self,
|
||||||
|
args: Optional[str] = "",
|
||||||
|
pipes: Optional[str] = "",
|
||||||
|
shell: Optional[Shell] = None,
|
||||||
|
options: Optional[CommandOptions] = None,
|
||||||
|
):
|
||||||
|
if not shell:
|
||||||
|
shell = self.host.get_shell()
|
||||||
|
|
||||||
|
cli_config = self.host.get_cli_config(self.cli_name, True)
|
||||||
|
extra_args = ""
|
||||||
|
exec_path = self.cli_name
|
||||||
|
if cli_config:
|
||||||
|
extra_args = " ".join(cli_config.extra_args)
|
||||||
|
exec_path = cli_config.exec_path
|
||||||
|
|
||||||
|
cmd = f"{exec_path} {args} {extra_args} {pipes}"
|
||||||
|
return shell.exec(cmd, options)
|
91
src/frostfs_testlib/cli/netmap_parser.py
Normal file
91
src/frostfs_testlib/cli/netmap_parser.py
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus
|
||||||
|
|
||||||
|
|
||||||
|
class NetmapParser:
|
||||||
|
@staticmethod
|
||||||
|
def netinfo(output: str) -> NodeNetInfo:
|
||||||
|
regexes = {
|
||||||
|
"epoch": r"Epoch: (?P<epoch>\d+)",
|
||||||
|
"network_magic": r"Network magic: (?P<network_magic>.*$)",
|
||||||
|
"time_per_block": r"Time per block: (?P<time_per_block>\d+\w+)",
|
||||||
|
"container_fee": r"Container fee: (?P<container_fee>\d+)",
|
||||||
|
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
|
||||||
|
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
|
||||||
|
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
|
||||||
|
"maximum_count_of_data_shards": r"Maximum count of data shards: (?P<maximum_count_of_data_shards>\d+)",
|
||||||
|
"maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P<maximum_count_of_parity_shards>\d+)",
|
||||||
|
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
|
||||||
|
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
|
||||||
|
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
|
||||||
|
"eigen_trust_alpha": r"EigenTrustAlpha: (?P<eigen_trust_alpha>\d+\w+$)",
|
||||||
|
"eigen_trust_iterations": r"EigenTrustIterations: (?P<eigen_trust_iterations>\d+)",
|
||||||
|
}
|
||||||
|
parse_result = {}
|
||||||
|
|
||||||
|
for key, regex in regexes.items():
|
||||||
|
search_result = re.search(regex, output, flags=re.MULTILINE)
|
||||||
|
if search_result == None:
|
||||||
|
parse_result[key] = None
|
||||||
|
continue
|
||||||
|
parse_result[key] = search_result[key].strip()
|
||||||
|
|
||||||
|
node_netinfo = NodeNetInfo(**parse_result)
|
||||||
|
|
||||||
|
return node_netinfo
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]:
|
||||||
|
"""The code will parse each line and return each node as dataclass."""
|
||||||
|
netmap_nodes = output.split("Node ")[1:]
|
||||||
|
dataclasses_netmap = []
|
||||||
|
result_netmap = {}
|
||||||
|
|
||||||
|
regexes = {
|
||||||
|
"node_id": r"\d+: (?P<node_id>\w+)",
|
||||||
|
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
|
||||||
|
"node_status": r"(?P<node_status>ONLINE|MAINTENANCE|OFFLINE)",
|
||||||
|
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
|
||||||
|
"continent": r"Continent: (?P<continent>\w+)",
|
||||||
|
"country": r"Country: (?P<country>\w+)",
|
||||||
|
"country_code": r"CountryCode: (?P<country_code>\w+)",
|
||||||
|
"external_address": r"ExternalAddr: (?P<external_address>/ip[4].+?)$",
|
||||||
|
"location": r"Location: (?P<location>\w+.*)",
|
||||||
|
"node": r"Node: (?P<node>\d+\.\d+\.\d+\.\d+)",
|
||||||
|
"price": r"Price: (?P<price>\d+)",
|
||||||
|
"sub_div": r"SubDiv: (?P<sub_div>.*)",
|
||||||
|
"sub_div_code": r"SubDivCode: (?P<sub_div_code>\w+)",
|
||||||
|
"un_locode": r"UN-LOCODE: (?P<un_locode>\w+.*)",
|
||||||
|
"role": r"role: (?P<role>\w+)",
|
||||||
|
}
|
||||||
|
|
||||||
|
for node in netmap_nodes:
|
||||||
|
for key, regex in regexes.items():
|
||||||
|
search_result = re.search(regex, node, flags=re.MULTILINE)
|
||||||
|
if search_result == None:
|
||||||
|
result_netmap[key] = None
|
||||||
|
continue
|
||||||
|
if key == "node_data_ips":
|
||||||
|
result_netmap[key] = search_result[key].strip().split(" ")
|
||||||
|
continue
|
||||||
|
if key == "external_address":
|
||||||
|
result_netmap[key] = search_result[key].strip().split(",")
|
||||||
|
continue
|
||||||
|
if key == "node_status":
|
||||||
|
result_netmap[key] = NodeStatus(search_result[key].strip().lower())
|
||||||
|
continue
|
||||||
|
result_netmap[key] = search_result[key].strip()
|
||||||
|
|
||||||
|
dataclasses_netmap.append(NodeNetmapInfo(**result_netmap))
|
||||||
|
|
||||||
|
return dataclasses_netmap
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None:
|
||||||
|
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
|
||||||
|
snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip]
|
||||||
|
if not snapshot_node:
|
||||||
|
return None
|
||||||
|
return snapshot_node[0]
|
|
@ -1,207 +0,0 @@
|
||||||
import frostfs_testlib.resources.optionals as optionals
|
|
||||||
from frostfs_testlib.load.k6 import K6
|
|
||||||
from frostfs_testlib.load.load_config import (
|
|
||||||
EndpointSelectionStrategy,
|
|
||||||
K6ProcessAllocationStrategy,
|
|
||||||
LoadParams,
|
|
||||||
LoadScenario,
|
|
||||||
LoadType,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.resources.load_params import (
|
|
||||||
K6_TEARDOWN_PERIOD,
|
|
||||||
LOAD_NODE_SSH_PASSWORD,
|
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
|
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
|
||||||
LOAD_NODE_SSH_USER,
|
|
||||||
LOAD_NODES,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.shell.interfaces import SshCredentials
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
from frostfs_testlib.storage.cluster.frostfs_services import S3Gate, StorageNode
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.testing.test_control import run_optionally
|
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
|
|
||||||
class BackgroundLoadController:
|
|
||||||
k6_instances: list[K6]
|
|
||||||
k6_dir: str
|
|
||||||
load_params: LoadParams
|
|
||||||
load_nodes: list[str]
|
|
||||||
verification_params: LoadParams
|
|
||||||
nodes_under_load: list[ClusterNode]
|
|
||||||
ssh_credentials: SshCredentials
|
|
||||||
loaders_wallet: WalletInfo
|
|
||||||
endpoints: list[str]
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
k6_dir: str,
|
|
||||||
load_params: LoadParams,
|
|
||||||
loaders_wallet: WalletInfo,
|
|
||||||
nodes_under_load: list[ClusterNode],
|
|
||||||
) -> None:
|
|
||||||
self.k6_dir = k6_dir
|
|
||||||
self.load_params = load_params
|
|
||||||
self.nodes_under_load = nodes_under_load
|
|
||||||
self.load_nodes = LOAD_NODES
|
|
||||||
self.loaders_wallet = loaders_wallet
|
|
||||||
|
|
||||||
if load_params.endpoint_selection_strategy is None:
|
|
||||||
raise RuntimeError("endpoint_selection_strategy should not be None")
|
|
||||||
|
|
||||||
self.endpoints = self._get_endpoints(
|
|
||||||
load_params.load_type, load_params.endpoint_selection_strategy
|
|
||||||
)
|
|
||||||
self.verification_params = LoadParams(
|
|
||||||
clients=load_params.readers,
|
|
||||||
scenario=LoadScenario.VERIFY,
|
|
||||||
registry_file=load_params.registry_file,
|
|
||||||
verify_time=load_params.verify_time,
|
|
||||||
load_type=load_params.load_type,
|
|
||||||
load_id=load_params.load_id,
|
|
||||||
working_dir=load_params.working_dir,
|
|
||||||
endpoint_selection_strategy=load_params.endpoint_selection_strategy,
|
|
||||||
k6_process_allocation_strategy=load_params.k6_process_allocation_strategy,
|
|
||||||
)
|
|
||||||
self.ssh_credentials = SshCredentials(
|
|
||||||
LOAD_NODE_SSH_USER,
|
|
||||||
LOAD_NODE_SSH_PASSWORD,
|
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
|
|
||||||
def _get_endpoints(
|
|
||||||
self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy
|
|
||||||
):
|
|
||||||
all_endpoints = {
|
|
||||||
LoadType.gRPC: {
|
|
||||||
EndpointSelectionStrategy.ALL: list(
|
|
||||||
set(
|
|
||||||
endpoint
|
|
||||||
for node_under_load in self.nodes_under_load
|
|
||||||
for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint()
|
|
||||||
)
|
|
||||||
),
|
|
||||||
EndpointSelectionStrategy.FIRST: list(
|
|
||||||
set(
|
|
||||||
node_under_load.service(StorageNode).get_rpc_endpoint()
|
|
||||||
for node_under_load in self.nodes_under_load
|
|
||||||
)
|
|
||||||
),
|
|
||||||
},
|
|
||||||
# for some reason xk6 appends http protocol on its own
|
|
||||||
LoadType.S3: {
|
|
||||||
EndpointSelectionStrategy.ALL: list(
|
|
||||||
set(
|
|
||||||
endpoint.replace("http://", "")
|
|
||||||
for node_under_load in self.nodes_under_load
|
|
||||||
for endpoint in node_under_load.service(S3Gate).get_all_endpoints()
|
|
||||||
)
|
|
||||||
),
|
|
||||||
EndpointSelectionStrategy.FIRST: list(
|
|
||||||
set(
|
|
||||||
node_under_load.service(S3Gate).get_endpoint().replace("http://", "")
|
|
||||||
for node_under_load in self.nodes_under_load
|
|
||||||
)
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return all_endpoints[load_type][endpoint_selection_strategy]
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
|
||||||
@reporter.step_deco("Prepare background load instances")
|
|
||||||
def prepare(self):
|
|
||||||
if self.load_params.load_type == LoadType.S3:
|
|
||||||
init_s3_client(
|
|
||||||
self.load_nodes,
|
|
||||||
self.load_params,
|
|
||||||
self.k6_dir,
|
|
||||||
self.ssh_credentials,
|
|
||||||
self.nodes_under_load,
|
|
||||||
self.loaders_wallet,
|
|
||||||
)
|
|
||||||
|
|
||||||
self._prepare(self.load_params)
|
|
||||||
|
|
||||||
def _prepare(self, load_params: LoadParams):
|
|
||||||
self.k6_instances = prepare_k6_instances(
|
|
||||||
load_nodes=LOAD_NODES,
|
|
||||||
ssh_credentials=self.ssh_credentials,
|
|
||||||
k6_dir=self.k6_dir,
|
|
||||||
load_params=load_params,
|
|
||||||
endpoints=self.endpoints,
|
|
||||||
loaders_wallet=self.loaders_wallet,
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
|
||||||
@reporter.step_deco("Start background load")
|
|
||||||
def start(self):
|
|
||||||
if self.load_params.preset is None:
|
|
||||||
raise RuntimeError("Preset should not be none at the moment of start")
|
|
||||||
|
|
||||||
with reporter.step(
|
|
||||||
f"Start background load on nodes {self.nodes_under_load}: "
|
|
||||||
f"writers = {self.load_params.writers}, "
|
|
||||||
f"obj_size = {self.load_params.object_size}, "
|
|
||||||
f"load_time = {self.load_params.load_time}, "
|
|
||||||
f"prepare_json = {self.load_params.preset.pregen_json}, "
|
|
||||||
f"endpoints = {self.endpoints}"
|
|
||||||
):
|
|
||||||
for k6_load_instance in self.k6_instances:
|
|
||||||
k6_load_instance.start()
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
|
||||||
@reporter.step_deco("Stop background load")
|
|
||||||
def stop(self):
|
|
||||||
for k6_load_instance in self.k6_instances:
|
|
||||||
k6_load_instance.stop()
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True)
|
|
||||||
def is_running(self):
|
|
||||||
for k6_load_instance in self.k6_instances:
|
|
||||||
if not k6_load_instance.is_running:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def wait_until_finish(self):
|
|
||||||
if self.load_params.load_time is None:
|
|
||||||
raise RuntimeError("LoadTime should not be none")
|
|
||||||
|
|
||||||
for k6_instance in self.k6_instances:
|
|
||||||
k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD))
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
|
||||||
def verify(self):
|
|
||||||
if self.verification_params.verify_time is None:
|
|
||||||
raise RuntimeError("verify_time should not be none")
|
|
||||||
|
|
||||||
self._prepare(self.verification_params)
|
|
||||||
with reporter.step("Run verify background load data"):
|
|
||||||
for k6_verify_instance in self.k6_instances:
|
|
||||||
k6_verify_instance.start()
|
|
||||||
k6_verify_instance.wait_until_finished(self.verification_params.verify_time)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
|
||||||
@reporter.step_deco("K6 run results")
|
|
||||||
def get_results(self) -> dict:
|
|
||||||
results = {}
|
|
||||||
for k6_instance in self.k6_instances:
|
|
||||||
if k6_instance.load_params.k6_process_allocation_strategy is None:
|
|
||||||
raise RuntimeError("k6_process_allocation_strategy should not be none")
|
|
||||||
|
|
||||||
result = k6_instance.get_results()
|
|
||||||
keys_map = {
|
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node,
|
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0],
|
|
||||||
}
|
|
||||||
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
|
|
||||||
results[key] = result
|
|
||||||
|
|
||||||
return results
|
|
|
@ -1,130 +0,0 @@
|
||||||
import time
|
|
||||||
|
|
||||||
import allure
|
|
||||||
|
|
||||||
import frostfs_testlib.resources.optionals as optionals
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.shell import CommandOptions, Shell
|
|
||||||
from frostfs_testlib.steps import epoch
|
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
|
|
||||||
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
|
||||||
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
|
|
||||||
from frostfs_testlib.utils.failover_utils import (
|
|
||||||
wait_all_storage_nodes_returned,
|
|
||||||
wait_for_host_offline,
|
|
||||||
wait_for_host_online,
|
|
||||||
wait_for_node_online,
|
|
||||||
)
|
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
|
|
||||||
class ClusterStateController:
|
|
||||||
def __init__(self, shell: Shell, cluster: Cluster) -> None:
|
|
||||||
self.stopped_nodes: list[ClusterNode] = []
|
|
||||||
self.detached_disks: dict[str, DiskController] = {}
|
|
||||||
self.stopped_storage_nodes: list[StorageNode] = []
|
|
||||||
self.cluster = cluster
|
|
||||||
self.shell = shell
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Stop host of node {node}")
|
|
||||||
def stop_node_host(self, node: ClusterNode, mode: str):
|
|
||||||
with allure.step(f"Stop host {node.host.config.address}"):
|
|
||||||
node.host.stop_host(mode=mode)
|
|
||||||
wait_for_host_offline(self.shell, node.storage_node)
|
|
||||||
self.stopped_nodes.append(node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Start host of node {node}")
|
|
||||||
def start_node_host(self, node: ClusterNode):
|
|
||||||
with allure.step(f"Start host {node.host.config.address}"):
|
|
||||||
node.host.start_host()
|
|
||||||
wait_for_host_online(self.shell, node.storage_node)
|
|
||||||
wait_for_node_online(node.storage_node)
|
|
||||||
self.stopped_nodes.remove(node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Start stopped hosts")
|
|
||||||
def start_stopped_hosts(self):
|
|
||||||
for node in self.stopped_nodes:
|
|
||||||
node.host.start_host()
|
|
||||||
self.stopped_nodes = []
|
|
||||||
wait_all_storage_nodes_returned(self.shell, self.cluster)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}")
|
|
||||||
def detach_disk(self, node: StorageNode, device: str, mountpoint: str):
|
|
||||||
disk_controller = self._get_disk_controller(node, device, mountpoint)
|
|
||||||
self.detached_disks[disk_controller.id] = disk_controller
|
|
||||||
disk_controller.detach()
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}")
|
|
||||||
def attach_disk(self, node: StorageNode, device: str, mountpoint: str):
|
|
||||||
disk_controller = self._get_disk_controller(node, device, mountpoint)
|
|
||||||
disk_controller.attach()
|
|
||||||
self.detached_disks.pop(disk_controller.id, None)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Restore detached disks")
|
|
||||||
def restore_disks(self):
|
|
||||||
for disk_controller in self.detached_disks.values():
|
|
||||||
disk_controller.attach()
|
|
||||||
self.detached_disks = {}
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Stop storage service on {node}")
|
|
||||||
def stop_storage_service(self, node: ClusterNode):
|
|
||||||
node.storage_node.stop_service()
|
|
||||||
self.stopped_storage_nodes.append(node.storage_node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Start storage service on {node}")
|
|
||||||
def start_storage_service(self, node: ClusterNode):
|
|
||||||
node.storage_node.start_service()
|
|
||||||
self.stopped_storage_nodes.remove(node.storage_node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Start stopped storage services")
|
|
||||||
def start_stopped_storage_services(self):
|
|
||||||
for node in self.stopped_storage_nodes:
|
|
||||||
node.start_service()
|
|
||||||
self.stopped_storage_nodes = []
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
|
|
||||||
def panic_reboot_host(self, node: ClusterNode):
|
|
||||||
shell = node.host.get_shell()
|
|
||||||
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
|
|
||||||
|
|
||||||
options = CommandOptions(close_stdin=True, timeout=1, check=False)
|
|
||||||
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
|
|
||||||
|
|
||||||
# Let the things to be settled
|
|
||||||
# A little wait here to prevent ssh stuck during panic
|
|
||||||
time.sleep(10)
|
|
||||||
wait_for_host_online(self.shell, node.storage_node)
|
|
||||||
wait_for_node_online(node.storage_node)
|
|
||||||
|
|
||||||
@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs")
|
|
||||||
def wait_for_epochs_align(self, timeout=60):
|
|
||||||
@wait_for_success(timeout, 5, None, True)
|
|
||||||
def check_epochs():
|
|
||||||
epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster)
|
|
||||||
assert (
|
|
||||||
len(set(epochs_by_node.values())) == 1
|
|
||||||
), f"unaligned epochs found: {epochs_by_node}"
|
|
||||||
|
|
||||||
check_epochs()
|
|
||||||
|
|
||||||
def _get_disk_controller(
|
|
||||||
self, node: StorageNode, device: str, mountpoint: str
|
|
||||||
) -> DiskController:
|
|
||||||
disk_controller_id = DiskController.get_id(node, device)
|
|
||||||
if disk_controller_id in self.detached_disks.keys():
|
|
||||||
disk_controller = self.detached_disks[disk_controller_id]
|
|
||||||
else:
|
|
||||||
disk_controller = DiskController(node, device, mountpoint)
|
|
||||||
|
|
||||||
return disk_controller
|
|
47
src/frostfs_testlib/credentials/authmate_s3_provider.py
Normal file
47
src/frostfs_testlib/credentials/authmate_s3_provider.py
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli import FrostfsAuthmate
|
||||||
|
from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
||||||
|
from frostfs_testlib.shell import LocalShell
|
||||||
|
from frostfs_testlib.steps.cli.container import list_containers
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
||||||
|
from frostfs_testlib.utils import string_utils
|
||||||
|
|
||||||
|
|
||||||
|
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
|
||||||
|
@reporter.step("Init S3 Credentials using Authmate CLI")
|
||||||
|
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
|
||||||
|
cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes
|
||||||
|
shell = LocalShell()
|
||||||
|
wallet = user.wallet
|
||||||
|
endpoint = cluster_node.storage_node.get_rpc_endpoint()
|
||||||
|
|
||||||
|
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
||||||
|
# unique short bucket name
|
||||||
|
bucket = string_utils.unique_name("bucket-")
|
||||||
|
|
||||||
|
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
||||||
|
issue_secret_output = frostfs_authmate.secret.issue(
|
||||||
|
wallet=wallet.path,
|
||||||
|
peer=endpoint,
|
||||||
|
gate_public_key=gate_public_keys,
|
||||||
|
wallet_password=wallet.password,
|
||||||
|
container_policy=location_constraints,
|
||||||
|
container_friendly_name=bucket,
|
||||||
|
).stdout
|
||||||
|
|
||||||
|
aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id"))
|
||||||
|
aws_secret_access_key = str(
|
||||||
|
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group("aws_secret_access_key")
|
||||||
|
)
|
||||||
|
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
|
||||||
|
|
||||||
|
containers_list = list_containers(wallet, shell, endpoint)
|
||||||
|
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
|
||||||
|
|
||||||
|
user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key)
|
||||||
|
return user.s3_credentials
|
51
src/frostfs_testlib/credentials/interfaces.py
Normal file
51
src/frostfs_testlib/credentials/interfaces.py
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.plugins import load_plugin
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class S3Credentials:
|
||||||
|
access_key: str
|
||||||
|
secret_key: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class User:
|
||||||
|
name: str
|
||||||
|
attributes: dict[str, Any] = field(default_factory=dict)
|
||||||
|
wallet: WalletInfo | None = None
|
||||||
|
s3_credentials: S3Credentials | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class S3CredentialsProvider(ABC):
|
||||||
|
def __init__(self, cluster: Cluster) -> None:
|
||||||
|
self.cluster = cluster
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials:
|
||||||
|
raise NotImplementedError("Directly called abstract class?")
|
||||||
|
|
||||||
|
|
||||||
|
class GrpcCredentialsProvider(ABC):
|
||||||
|
def __init__(self, cluster: Cluster) -> None:
|
||||||
|
self.cluster = cluster
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo:
|
||||||
|
raise NotImplementedError("Directly called abstract class?")
|
||||||
|
|
||||||
|
|
||||||
|
class CredentialsProvider(object):
|
||||||
|
S3: S3CredentialsProvider
|
||||||
|
GRPC: GrpcCredentialsProvider
|
||||||
|
|
||||||
|
def __init__(self, cluster: Cluster) -> None:
|
||||||
|
config = cluster.cluster_nodes[0].host.config
|
||||||
|
s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name)
|
||||||
|
self.S3 = s3_cls(cluster)
|
||||||
|
grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name)
|
||||||
|
self.GRPC = grpc_cls(cluster)
|
14
src/frostfs_testlib/credentials/wallet_factory_provider.py
Normal file
14
src/frostfs_testlib/credentials/wallet_factory_provider.py
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
|
||||||
|
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
|
||||||
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
|
||||||
|
|
||||||
|
|
||||||
|
class WalletFactoryProvider(GrpcCredentialsProvider):
|
||||||
|
@reporter.step("Init gRPC Credentials using wallet generation")
|
||||||
|
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
|
||||||
|
wallet_factory = WalletFactory(ASSETS_DIR, LocalShell())
|
||||||
|
user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS)
|
||||||
|
return user.wallet
|
|
@ -1,5 +1,5 @@
|
||||||
class Options:
|
class Options:
|
||||||
DEFAULT_SHELL_TIMEOUT = 90
|
DEFAULT_SHELL_TIMEOUT = 120
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_default_shell_timeout():
|
def get_default_shell_timeout():
|
||||||
|
|
45
src/frostfs_testlib/fixtures.py
Normal file
45
src/frostfs_testlib/fixtures.py
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from importlib.metadata import entry_points
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.hosting.hosting import Hosting
|
||||||
|
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
|
||||||
|
from frostfs_testlib.storage import get_service_registry
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def configure_testlib():
|
||||||
|
reporter.get_reporter().register_handler(reporter.AllureHandler())
|
||||||
|
reporter.get_reporter().register_handler(reporter.StepsLogger())
|
||||||
|
logging.getLogger("paramiko").setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# Register Services for cluster
|
||||||
|
registry = get_service_registry()
|
||||||
|
services = entry_points(group="frostfs.testlib.services")
|
||||||
|
for svc in services:
|
||||||
|
registry.register_service(svc.name, svc.load())
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def temp_directory(configure_testlib):
|
||||||
|
with reporter.step("Prepare tmp directory"):
|
||||||
|
full_path = ASSETS_DIR
|
||||||
|
if not os.path.exists(full_path):
|
||||||
|
os.mkdir(full_path)
|
||||||
|
|
||||||
|
return full_path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def hosting(configure_testlib) -> Hosting:
|
||||||
|
with open(HOSTING_CONFIG_FILE, "r") as file:
|
||||||
|
hosting_config = yaml.full_load(file)
|
||||||
|
|
||||||
|
hosting_instance = Hosting()
|
||||||
|
hosting_instance.configure(hosting_config)
|
||||||
|
|
||||||
|
return hosting_instance
|
109
src/frostfs_testlib/healthcheck/basic_healthcheck.py
Normal file
109
src/frostfs_testlib/healthcheck/basic_healthcheck.py
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
|
from frostfs_testlib.shell import CommandOptions
|
||||||
|
from frostfs_testlib.steps.node_management import storage_node_healthcheck
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass
|
||||||
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
from frostfs_testlib.utils.failover_utils import check_services_status
|
||||||
|
|
||||||
|
|
||||||
|
class BasicHealthcheck(Healthcheck):
|
||||||
|
def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]):
|
||||||
|
issues: list[str] = []
|
||||||
|
for check, kwargs in checks.items():
|
||||||
|
issue = check(cluster_node, **kwargs)
|
||||||
|
if issue:
|
||||||
|
issues.append(issue)
|
||||||
|
|
||||||
|
assert not issues, "Issues found:\n" + "\n".join(issues)
|
||||||
|
|
||||||
|
@wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}")
|
||||||
|
def full_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
checks = {
|
||||||
|
self.storage_healthcheck: {},
|
||||||
|
self._tree_healthcheck: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
self._perform(cluster_node, checks)
|
||||||
|
|
||||||
|
@wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}")
|
||||||
|
def startup_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
checks = {
|
||||||
|
self.storage_healthcheck: {},
|
||||||
|
self._tree_healthcheck: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
self._perform(cluster_node, checks)
|
||||||
|
|
||||||
|
@wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}")
|
||||||
|
def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
|
||||||
|
checks = {
|
||||||
|
self._storage_healthcheck: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
self._perform(cluster_node, checks)
|
||||||
|
|
||||||
|
@wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}")
|
||||||
|
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
|
||||||
|
checks = {
|
||||||
|
self._tree_healthcheck: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
self._perform(cluster_node, checks)
|
||||||
|
|
||||||
|
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
|
||||||
|
def services_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
svcs_to_check = cluster_node.services
|
||||||
|
checks = {
|
||||||
|
check_services_status: {
|
||||||
|
"service_list": svcs_to_check,
|
||||||
|
"expected_status": "active",
|
||||||
|
},
|
||||||
|
self._check_services: {"services": svcs_to_check},
|
||||||
|
}
|
||||||
|
|
||||||
|
self._perform(cluster_node, checks)
|
||||||
|
|
||||||
|
def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]):
|
||||||
|
for svc in services:
|
||||||
|
result = svc.service_healthcheck()
|
||||||
|
if result == False:
|
||||||
|
return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}."
|
||||||
|
|
||||||
|
@reporter.step("Storage healthcheck on {cluster_node}")
|
||||||
|
def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
|
||||||
|
result = storage_node_healthcheck(cluster_node.storage_node)
|
||||||
|
self._gather_socket_info(cluster_node)
|
||||||
|
if result.health_status != "READY" or result.network_status != "ONLINE":
|
||||||
|
return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}"
|
||||||
|
|
||||||
|
@reporter.step("Tree healthcheck on {cluster_node}")
|
||||||
|
def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
|
||||||
|
host = cluster_node.host
|
||||||
|
service_config = host.get_service_config(cluster_node.storage_node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
remote_cli = FrostfsCli(
|
||||||
|
shell,
|
||||||
|
host.get_cli_config(FROSTFS_CLI_EXEC).exec_path,
|
||||||
|
config_file=wallet_config_path,
|
||||||
|
)
|
||||||
|
result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080")
|
||||||
|
if result.return_code != 0:
|
||||||
|
return (
|
||||||
|
f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@reporter.step("Gather socket info for {cluster_node}")
|
||||||
|
def _gather_socket_info(self, cluster_node: ClusterNode):
|
||||||
|
cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False))
|
25
src/frostfs_testlib/healthcheck/interfaces.py
Normal file
25
src/frostfs_testlib/healthcheck/interfaces.py
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
|
class Healthcheck(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def full_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform full healthcheck on the target cluster node"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def startup_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform healthcheck required on startup of target cluster node"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def storage_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform storage service healthcheck on target cluster node"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def services_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform service status check on target cluster node"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def tree_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform tree healthcheck on target cluster node"""
|
13
src/frostfs_testlib/hooks.py
Normal file
13
src/frostfs_testlib/hooks.py
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.hookimpl
|
||||||
|
def pytest_collection_modifyitems(items: list[pytest.Item]):
|
||||||
|
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
|
||||||
|
# nodeid = full path of the test
|
||||||
|
# 1. plugins
|
||||||
|
# 2. testlib itself
|
||||||
|
for item in items:
|
||||||
|
location = item.location[0]
|
||||||
|
if "frostfs" in location and "plugin" not in location and "testlib" not in location:
|
||||||
|
item.add_marker("frostfs")
|
|
@ -10,9 +10,7 @@ class ParsedAttributes:
|
||||||
def parse(cls, attributes: dict[str, Any]):
|
def parse(cls, attributes: dict[str, Any]):
|
||||||
# Pick attributes supported by the class
|
# Pick attributes supported by the class
|
||||||
field_names = set(field.name for field in fields(cls))
|
field_names = set(field.name for field in fields(cls))
|
||||||
supported_attributes = {
|
supported_attributes = {key: value for key, value in attributes.items() if key in field_names}
|
||||||
key: value for key, value in attributes.items() if key in field_names
|
|
||||||
}
|
|
||||||
return cls(**supported_attributes)
|
return cls(**supported_attributes)
|
||||||
|
|
||||||
|
|
||||||
|
@ -29,6 +27,7 @@ class CLIConfig:
|
||||||
name: str
|
name: str
|
||||||
exec_path: str
|
exec_path: str
|
||||||
attributes: dict[str, str] = field(default_factory=dict)
|
attributes: dict[str, str] = field(default_factory=dict)
|
||||||
|
extra_args: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -52,6 +51,7 @@ class HostConfig:
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
plugin_name: Name of plugin that should be used to manage the host.
|
plugin_name: Name of plugin that should be used to manage the host.
|
||||||
|
healthcheck_plugin_name: Name of the plugin for healthcheck operations.
|
||||||
address: Address of the machine (IP or DNS name).
|
address: Address of the machine (IP or DNS name).
|
||||||
services: List of services hosted on the machine.
|
services: List of services hosted on the machine.
|
||||||
clis: List of CLI tools available on the machine.
|
clis: List of CLI tools available on the machine.
|
||||||
|
@ -60,10 +60,17 @@ class HostConfig:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
plugin_name: str
|
plugin_name: str
|
||||||
|
hostname: str
|
||||||
|
healthcheck_plugin_name: str
|
||||||
address: str
|
address: str
|
||||||
|
s3_creds_plugin_name: str = field(default="authmate")
|
||||||
|
grpc_creds_plugin_name: str = field(default="wallet_factory")
|
||||||
|
product: str = field(default="frostfs")
|
||||||
services: list[ServiceConfig] = field(default_factory=list)
|
services: list[ServiceConfig] = field(default_factory=list)
|
||||||
clis: list[CLIConfig] = field(default_factory=list)
|
clis: list[CLIConfig] = field(default_factory=list)
|
||||||
attributes: dict[str, str] = field(default_factory=dict)
|
attributes: dict[str, str] = field(default_factory=dict)
|
||||||
|
interfaces: dict[str, str] = field(default_factory=dict)
|
||||||
|
environment: dict[str, str] = field(default_factory=dict)
|
||||||
|
|
||||||
def __post_init__(self) -> None:
|
def __post_init__(self) -> None:
|
||||||
self.services = [ServiceConfig(**service) for service in self.services or []]
|
self.services = [ServiceConfig(**service) for service in self.services or []]
|
||||||
|
|
|
@ -11,7 +11,7 @@ import docker
|
||||||
from requests import HTTPError
|
from requests import HTTPError
|
||||||
|
|
||||||
from frostfs_testlib.hosting.config import ParsedAttributes
|
from frostfs_testlib.hosting.config import ParsedAttributes
|
||||||
from frostfs_testlib.hosting.interfaces import DiskInfo, Host
|
from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus
|
||||||
from frostfs_testlib.shell import LocalShell, Shell, SSHShell
|
from frostfs_testlib.shell import LocalShell, Shell, SSHShell
|
||||||
from frostfs_testlib.shell.command_inspectors import SudoInspector
|
from frostfs_testlib.shell.command_inspectors import SudoInspector
|
||||||
|
|
||||||
|
@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes):
|
||||||
class DockerHost(Host):
|
class DockerHost(Host):
|
||||||
"""Manages services hosted in Docker containers running on a local or remote machine."""
|
"""Manages services hosted in Docker containers running on a local or remote machine."""
|
||||||
|
|
||||||
def get_shell(self) -> Shell:
|
def get_shell(self, sudo: bool = False) -> Shell:
|
||||||
host_attributes = HostAttributes.parse(self._config.attributes)
|
host_attributes = HostAttributes.parse(self._config.attributes)
|
||||||
command_inspectors = []
|
command_inspectors = []
|
||||||
if host_attributes.sudo_shell:
|
if sudo:
|
||||||
command_inspectors.append(SudoInspector())
|
command_inspectors.append(SudoInspector())
|
||||||
|
|
||||||
if not host_attributes.ssh_login:
|
if not host_attributes.ssh_login:
|
||||||
|
@ -87,6 +87,15 @@ class DockerHost(Host):
|
||||||
for service_config in self._config.services:
|
for service_config in self._config.services:
|
||||||
self.start_service(service_config.name)
|
self.start_service(service_config.name)
|
||||||
|
|
||||||
|
def get_host_status(self) -> HostStatus:
|
||||||
|
# We emulate host status by checking all services.
|
||||||
|
for service_config in self._config.services:
|
||||||
|
state = self._get_container_state(service_config.name)
|
||||||
|
if state != "running":
|
||||||
|
return HostStatus.OFFLINE
|
||||||
|
|
||||||
|
return HostStatus.ONLINE
|
||||||
|
|
||||||
def stop_host(self) -> None:
|
def stop_host(self) -> None:
|
||||||
# We emulate stopping machine by stopping all services
|
# We emulate stopping machine by stopping all services
|
||||||
# As an alternative we can probably try to stop docker service...
|
# As an alternative we can probably try to stop docker service...
|
||||||
|
@ -117,6 +126,20 @@ class DockerHost(Host):
|
||||||
timeout=service_attributes.stop_timeout,
|
timeout=service_attributes.stop_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def mask_service(self, service_name: str) -> None:
|
||||||
|
# Not required for Docker
|
||||||
|
return
|
||||||
|
|
||||||
|
def unmask_service(self, service_name: str) -> None:
|
||||||
|
# Not required for Docker
|
||||||
|
return
|
||||||
|
|
||||||
|
def wait_success_suspend_process(self, service_name: str):
|
||||||
|
raise NotImplementedError("Not supported for docker")
|
||||||
|
|
||||||
|
def wait_success_resume_process(self, service_name: str):
|
||||||
|
raise NotImplementedError("Not supported for docker")
|
||||||
|
|
||||||
def restart_service(self, service_name: str) -> None:
|
def restart_service(self, service_name: str) -> None:
|
||||||
service_attributes = self._get_service_attributes(service_name)
|
service_attributes = self._get_service_attributes(service_name)
|
||||||
|
|
||||||
|
@ -129,13 +152,51 @@ class DockerHost(Host):
|
||||||
timeout=service_attributes.start_timeout,
|
timeout=service_attributes.start_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def get_data_directory(self, service_name: str) -> str:
|
||||||
service_attributes = self._get_service_attributes(service_name)
|
service_attributes = self._get_service_attributes(service_name)
|
||||||
|
|
||||||
client = self._get_docker_client()
|
client = self._get_docker_client()
|
||||||
volume_info = client.inspect_volume(service_attributes.volume_name)
|
volume_info = client.inspect_volume(service_attributes.volume_name)
|
||||||
volume_path = volume_info["Mountpoint"]
|
volume_path = volume_info["Mountpoint"]
|
||||||
|
|
||||||
|
return volume_path
|
||||||
|
|
||||||
|
def send_signal_to_service(self, service_name: str, signal: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_metabase(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_write_cache(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_fstree(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_blobovnicza(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_pilorama(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_file(self, file_path: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def is_file_exist(self, file_path: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def wipefs_storage_node_data(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def finish_wipefs(self, service_name: str) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
|
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
||||||
|
volume_path = self.get_data_directory(service_name)
|
||||||
|
|
||||||
shell = self.get_shell()
|
shell = self.get_shell()
|
||||||
meta_clean_cmd = f"rm -rf {volume_path}/meta*/*"
|
meta_clean_cmd = f"rm -rf {volume_path}/meta*/*"
|
||||||
data_clean_cmd = f"; rm -rf {volume_path}/data*/*" if not cache_only else ""
|
data_clean_cmd = f"; rm -rf {volume_path}/data*/*" if not cache_only else ""
|
||||||
|
@ -181,11 +242,41 @@ class DockerHost(Host):
|
||||||
with open(file_path, "wb") as file:
|
with open(file_path, "wb") as file:
|
||||||
file.write(logs)
|
file.write(logs)
|
||||||
|
|
||||||
|
def get_filtered_logs(
|
||||||
|
self,
|
||||||
|
filter_regex: str,
|
||||||
|
since: Optional[datetime] = None,
|
||||||
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
|
exclude_filter: Optional[str] = None,
|
||||||
|
priority: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
client = self._get_docker_client()
|
||||||
|
filtered_logs = ""
|
||||||
|
for service_config in self._config.services:
|
||||||
|
container_name = self._get_service_attributes(service_config.name).container_name
|
||||||
|
try:
|
||||||
|
filtered_logs = client.logs(container_name, since=since, until=until)
|
||||||
|
except HTTPError as exc:
|
||||||
|
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if exclude_filter:
|
||||||
|
filtered_logs = filtered_logs.replace(exclude_filter, "")
|
||||||
|
matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE)
|
||||||
|
found = list(matches)
|
||||||
|
|
||||||
|
if found:
|
||||||
|
filtered_logs += f"{container_name}:\n{os.linesep.join(found)}"
|
||||||
|
|
||||||
|
return filtered_logs
|
||||||
|
|
||||||
def is_message_in_logs(
|
def is_message_in_logs(
|
||||||
self,
|
self,
|
||||||
message_regex: str,
|
message_regex: str,
|
||||||
since: Optional[datetime] = None,
|
since: Optional[datetime] = None,
|
||||||
until: Optional[datetime] = None,
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
client = self._get_docker_client()
|
client = self._get_docker_client()
|
||||||
for service_config in self._config.services:
|
for service_config in self._config.services:
|
||||||
|
@ -228,20 +319,23 @@ class DockerHost(Host):
|
||||||
return container
|
return container
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _wait_for_container_to_be_in_state(
|
def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None:
|
||||||
self, container_name: str, expected_state: str, timeout: int
|
|
||||||
) -> None:
|
|
||||||
iterations = 10
|
iterations = 10
|
||||||
iteration_wait_time = timeout / iterations
|
iteration_wait_time = timeout / iterations
|
||||||
|
|
||||||
# To speed things up, we break timeout in smaller iterations and check container state
|
# To speed things up, we break timeout in smaller iterations and check container state
|
||||||
# several times. This way waiting stops as soon as container reaches the expected state
|
# several times. This way waiting stops as soon as container reaches the expected state
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
container = self._get_container_by_name(container_name)
|
state = self._get_container_state(container_name)
|
||||||
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
|
|
||||||
|
|
||||||
if container and container["State"] == expected_state:
|
if state == expected_state:
|
||||||
return
|
return
|
||||||
time.sleep(iteration_wait_time)
|
time.sleep(iteration_wait_time)
|
||||||
|
|
||||||
raise RuntimeError(f"Container {container_name} is not in {expected_state} state.")
|
raise RuntimeError(f"Container {container_name} is not in {expected_state} state.")
|
||||||
|
|
||||||
|
def _get_container_state(self, container_name: str) -> str:
|
||||||
|
container = self._get_container_by_name(container_name)
|
||||||
|
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
|
||||||
|
|
||||||
|
return container.get("State", None)
|
||||||
|
|
|
@ -4,6 +4,14 @@ from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
|
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
|
||||||
from frostfs_testlib.shell.interfaces import Shell
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
|
from frostfs_testlib.testing.test_control import retry
|
||||||
|
|
||||||
|
|
||||||
|
class HostStatus(HumanReadableEnum):
|
||||||
|
ONLINE = "Online"
|
||||||
|
OFFLINE = "Offline"
|
||||||
|
UNKNOWN = "Unknown"
|
||||||
|
|
||||||
|
|
||||||
class DiskInfo(dict):
|
class DiskInfo(dict):
|
||||||
|
@ -18,9 +26,7 @@ class Host(ABC):
|
||||||
|
|
||||||
def __init__(self, config: HostConfig) -> None:
|
def __init__(self, config: HostConfig) -> None:
|
||||||
self._config = config
|
self._config = config
|
||||||
self._service_config_by_name = {
|
self._service_config_by_name = {service_config.name: service_config for service_config in config.services}
|
||||||
service_config.name: service_config for service_config in config.services
|
|
||||||
}
|
|
||||||
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
|
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -48,7 +54,7 @@ class Host(ABC):
|
||||||
raise ValueError(f"Unknown service name: '{service_name}'")
|
raise ValueError(f"Unknown service name: '{service_name}'")
|
||||||
return service_config
|
return service_config
|
||||||
|
|
||||||
def get_cli_config(self, cli_name: str) -> CLIConfig:
|
def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig:
|
||||||
"""Returns config of CLI tool with specified name.
|
"""Returns config of CLI tool with specified name.
|
||||||
|
|
||||||
The CLI must be located on this host.
|
The CLI must be located on this host.
|
||||||
|
@ -60,14 +66,17 @@ class Host(ABC):
|
||||||
Config of the CLI tool.
|
Config of the CLI tool.
|
||||||
"""
|
"""
|
||||||
cli_config = self._cli_config_by_name.get(cli_name)
|
cli_config = self._cli_config_by_name.get(cli_name)
|
||||||
if cli_config is None:
|
if cli_config is None and not allow_empty:
|
||||||
raise ValueError(f"Unknown CLI name: '{cli_name}'")
|
raise ValueError(f"Unknown CLI name: '{cli_name}'")
|
||||||
return cli_config
|
return cli_config
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_shell(self) -> Shell:
|
def get_shell(self, sudo: bool = True) -> Shell:
|
||||||
"""Returns shell to this host.
|
"""Returns shell to this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sudo: if True, run all commands in shell with elevated rights
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Shell that executes commands on this host.
|
Shell that executes commands on this host.
|
||||||
"""
|
"""
|
||||||
|
@ -76,6 +85,10 @@ class Host(ABC):
|
||||||
def start_host(self) -> None:
|
def start_host(self) -> None:
|
||||||
"""Starts the host machine."""
|
"""Starts the host machine."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_host_status(self) -> HostStatus:
|
||||||
|
"""Check host status."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def stop_host(self, mode: str) -> None:
|
def stop_host(self, mode: str) -> None:
|
||||||
"""Stops the host machine.
|
"""Stops the host machine.
|
||||||
|
@ -104,6 +117,37 @@ class Host(ABC):
|
||||||
service_name: Name of the service to stop.
|
service_name: Name of the service to stop.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def send_signal_to_service(self, service_name: str, signal: str) -> None:
|
||||||
|
"""Send signal to service with specified name using kill -<signal>
|
||||||
|
|
||||||
|
The service must be hosted on this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of the service to stop.
|
||||||
|
signal: signal name. See kill -l to all names
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def mask_service(self, service_name: str) -> None:
|
||||||
|
"""Prevent the service from start by any activity by masking it.
|
||||||
|
|
||||||
|
The service must be hosted on this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of the service to mask.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def unmask_service(self, service_name: str) -> None:
|
||||||
|
"""Allow the service to start by any activity by unmasking it.
|
||||||
|
|
||||||
|
The service must be hosted on this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of the service to unmask.
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def restart_service(self, service_name: str) -> None:
|
def restart_service(self, service_name: str) -> None:
|
||||||
"""Restarts the service with specified name and waits until it starts.
|
"""Restarts the service with specified name and waits until it starts.
|
||||||
|
@ -112,6 +156,30 @@ class Host(ABC):
|
||||||
service_name: Name of the service to restart.
|
service_name: Name of the service to restart.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_data_directory(self, service_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Getting path to data directory on node for further usage
|
||||||
|
(example: list databases pilorama.db)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wait_success_suspend_process(self, process_name: str) -> None:
|
||||||
|
"""Search for a service ID by its name and stop the process
|
||||||
|
Args:
|
||||||
|
process_name: Name
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wait_success_resume_process(self, process_name: str) -> None:
|
||||||
|
"""Search for a service by its ID and start the process
|
||||||
|
Args:
|
||||||
|
process_name: Name
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
||||||
"""Erases all data of the storage node with specified name.
|
"""Erases all data of the storage node with specified name.
|
||||||
|
@ -121,6 +189,81 @@ class Host(ABC):
|
||||||
cache_only: To delete cache only.
|
cache_only: To delete cache only.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wipefs_storage_node_data(self, service_name: str) -> None:
|
||||||
|
"""Erases all data of the storage node with specified name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def finish_wipefs(self, service_name: str) -> None:
|
||||||
|
"""Erases all data of the storage node with specified name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_fstree(self, service_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Deletes all fstrees in the node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_metabase(self, service_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Deletes all metabase*.db in the node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_write_cache(self, service_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Deletes all write_cache in the node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_blobovnicza(self, service_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Deletes all blobovniczas in the node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of storage node service.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_file(self, file_path: str) -> None:
|
||||||
|
"""
|
||||||
|
Deletes file with provided file path
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: full path to the file to delete
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_file_exist(self, file_path: str) -> bool:
|
||||||
|
"""
|
||||||
|
Checks if file exist
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: full path to the file to check
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def detach_disk(self, device: str) -> DiskInfo:
|
def detach_disk(self, device: str) -> DiskInfo:
|
||||||
"""Detaches disk device to simulate disk offline/failover scenario.
|
"""Detaches disk device to simulate disk offline/failover scenario.
|
||||||
|
@ -172,12 +315,38 @@ class Host(ABC):
|
||||||
filter_regex: regex to filter output
|
filter_regex: regex to filter output
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_filtered_logs(
|
||||||
|
self,
|
||||||
|
filter_regex: str,
|
||||||
|
since: Optional[datetime] = None,
|
||||||
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
|
exclude_filter: Optional[str] = None,
|
||||||
|
priority: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
"""Get logs from host filtered by regex.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filter_regex: regex filter for logs.
|
||||||
|
since: If set, limits the time from which logs should be collected. Must be in UTC.
|
||||||
|
until: If set, limits the time until which logs should be collected. Must be in UTC.
|
||||||
|
unit: required unit.
|
||||||
|
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
|
||||||
|
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Found entries as str if any found.
|
||||||
|
Empty string otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def is_message_in_logs(
|
def is_message_in_logs(
|
||||||
self,
|
self,
|
||||||
message_regex: str,
|
message_regex: str,
|
||||||
since: Optional[datetime] = None,
|
since: Optional[datetime] = None,
|
||||||
until: Optional[datetime] = None,
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Checks logs on host for specified message regex.
|
"""Checks logs on host for specified message regex.
|
||||||
|
|
||||||
|
@ -190,3 +359,35 @@ class Host(ABC):
|
||||||
True if message found in logs in the given time frame.
|
True if message found in logs in the given time frame.
|
||||||
False otherwise.
|
False otherwise.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
|
||||||
|
"""
|
||||||
|
Waites for service to be in specified state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
systemd_service_name: Service to wait state of.
|
||||||
|
expected_state: State to wait for
|
||||||
|
timeout: Seconds to wait
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def down_interface(self, interface: str) -> None:
|
||||||
|
shell = self.get_shell()
|
||||||
|
shell.exec(f"ip link set {interface} down")
|
||||||
|
|
||||||
|
def up_interface(self, interface: str) -> None:
|
||||||
|
shell = self.get_shell()
|
||||||
|
shell.exec(f"ip link set {interface} up")
|
||||||
|
|
||||||
|
def check_state(self, interface: str) -> str:
|
||||||
|
shell = self.get_shell()
|
||||||
|
return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip()
|
||||||
|
|
||||||
|
@retry(max_attempts=5, sleep_interval=5, expected_result="UP")
|
||||||
|
def check_state_up(self, interface: str) -> str:
|
||||||
|
return self.check_state(interface=interface)
|
||||||
|
|
||||||
|
@retry(max_attempts=5, sleep_interval=5, expected_result="DOWN")
|
||||||
|
def check_state_down(self, interface: str) -> str:
|
||||||
|
return self.check_state(interface=interface)
|
||||||
|
|
0
src/frostfs_testlib/http/__init__.py
Normal file
0
src/frostfs_testlib/http/__init__.py
Normal file
97
src/frostfs_testlib/http/http_client.py
Normal file
97
src/frostfs_testlib/http/http_client.py
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import logging.config
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
|
||||||
|
timeout = httpx.Timeout(60, read=150)
|
||||||
|
LOGGING_CONFIG = {
|
||||||
|
"disable_existing_loggers": False,
|
||||||
|
"version": 1,
|
||||||
|
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
|
||||||
|
"formatters": {
|
||||||
|
"http": {
|
||||||
|
"format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s",
|
||||||
|
"datefmt": "%Y-%m-%d %H:%M:%S",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"loggers": {
|
||||||
|
"httpx": {
|
||||||
|
"handlers": ["default"],
|
||||||
|
"level": "DEBUG",
|
||||||
|
},
|
||||||
|
"httpcore": {
|
||||||
|
"handlers": ["default"],
|
||||||
|
"level": "ERROR",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
logging.config.dictConfig(LOGGING_CONFIG)
|
||||||
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
class HttpClient:
|
||||||
|
@reporter.step("Send {method} request to {url}")
|
||||||
|
def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response:
|
||||||
|
transport = httpx.HTTPTransport(verify=False, retries=5)
|
||||||
|
client = httpx.Client(timeout=timeout, transport=transport)
|
||||||
|
response = client.request(method, url, **kwargs)
|
||||||
|
|
||||||
|
self._attach_response(response)
|
||||||
|
logger.info(f"Response: {response.status_code} => {response.text}")
|
||||||
|
|
||||||
|
if expected_status_code:
|
||||||
|
assert response.status_code == expected_status_code, (
|
||||||
|
f"Got {response.status_code} response code" f" while {expected_status_code} expected"
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _attach_response(cls, response: httpx.Response):
|
||||||
|
request = response.request
|
||||||
|
|
||||||
|
try:
|
||||||
|
request_headers = json.dumps(dict(request.headers), indent=4)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
request_headers = str(request.headers)
|
||||||
|
|
||||||
|
try:
|
||||||
|
request_body = request.read()
|
||||||
|
try:
|
||||||
|
request_body = request_body.decode("utf-8")
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}"
|
||||||
|
except Exception as e:
|
||||||
|
request_body = f"Error reading request body: {str(e)}"
|
||||||
|
|
||||||
|
request_body = "" if request_body is None else request_body
|
||||||
|
|
||||||
|
try:
|
||||||
|
response_headers = json.dumps(dict(response.headers), indent=4)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
response_headers = str(response.headers)
|
||||||
|
|
||||||
|
report = (
|
||||||
|
f"Method: {request.method}\n\n"
|
||||||
|
f"URL: {request.url}\n\n"
|
||||||
|
f"Request Headers: {request_headers}\n\n"
|
||||||
|
f"Request Body: {request_body}\n\n"
|
||||||
|
f"Response Status Code: {response.status_code}\n\n"
|
||||||
|
f"Response Headers: {response_headers}\n\n"
|
||||||
|
f"Response Body: {response.text}\n\n"
|
||||||
|
)
|
||||||
|
curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body)
|
||||||
|
|
||||||
|
reporter.attach(report, "Requests Info")
|
||||||
|
reporter.attach(curl_request, "CURL")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str:
|
||||||
|
headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items())
|
||||||
|
data = f" -d '{data}'" if data else ""
|
||||||
|
# Option -k means no verify SSL
|
||||||
|
return f"curl {url} -X {method} {headers}{data} -k"
|
15
src/frostfs_testlib/load/__init__.py
Normal file
15
src/frostfs_testlib/load/__init__.py
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
|
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
||||||
|
from frostfs_testlib.load.load_config import (
|
||||||
|
EndpointSelectionStrategy,
|
||||||
|
K6ProcessAllocationStrategy,
|
||||||
|
LoadParams,
|
||||||
|
LoadScenario,
|
||||||
|
LoadType,
|
||||||
|
NodesSelectionStrategy,
|
||||||
|
Preset,
|
||||||
|
ReadFrom,
|
||||||
|
)
|
||||||
|
from frostfs_testlib.load.load_report import LoadReport
|
||||||
|
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
||||||
|
from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner
|
14
src/frostfs_testlib/load/interfaces/loader.py
Normal file
14
src/frostfs_testlib/load/interfaces/loader.py
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
|
||||||
|
|
||||||
|
class Loader(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def get_shell(self) -> Shell:
|
||||||
|
"""Get shell for the loader"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def ip(self):
|
||||||
|
"""Get address of the loader"""
|
55
src/frostfs_testlib/load/interfaces/scenario_runner.py
Normal file
55
src/frostfs_testlib/load/interfaces/scenario_runner.py
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
|
from frostfs_testlib.load.k6 import K6
|
||||||
|
from frostfs_testlib.load.load_config import LoadParams
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
|
class ScenarioRunner(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def prepare(
|
||||||
|
self,
|
||||||
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
|
nodes_under_load: list[ClusterNode],
|
||||||
|
k6_dir: str,
|
||||||
|
):
|
||||||
|
"""Preparation steps before running the load"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
"""Init K6 instances"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_k6_instances(self) -> list[K6]:
|
||||||
|
"""Get K6 instances"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def start(self):
|
||||||
|
"""Start K6 instances"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def stop(self):
|
||||||
|
"""Stop K6 instances"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def preset(self):
|
||||||
|
"""Run preset for load"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def is_running(self) -> bool:
|
||||||
|
"""Returns True if load is running at the moment"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wait_until_finish(self, soft_timeout: int = 0):
|
||||||
|
"""Wait until load is finished"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_results(self) -> dict:
|
||||||
|
"""Get results from K6 run"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_loaders(self) -> list[Loader]:
|
||||||
|
"""Return loaders"""
|
96
src/frostfs_testlib/load/interfaces/summarized.py
Normal file
96
src/frostfs_testlib/load/interfaces/summarized.py
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
|
||||||
|
from frostfs_testlib.load.load_metrics import get_metrics_object
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SummarizedErorrs:
|
||||||
|
total: int = field(default_factory=int)
|
||||||
|
percent: float = field(default_factory=float)
|
||||||
|
threshold: float = field(default_factory=float)
|
||||||
|
by_node: dict[str, int] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def calc_stats(self, operations):
|
||||||
|
self.total += sum(self.by_node.values())
|
||||||
|
|
||||||
|
if not operations:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.percent = self.total / operations * 100
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SummarizedLatencies:
|
||||||
|
avg: float = field(default_factory=float)
|
||||||
|
min: float = field(default_factory=float)
|
||||||
|
max: float = field(default_factory=float)
|
||||||
|
by_node: dict[str, dict[str, int]] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def calc_stats(self):
|
||||||
|
if not self.by_node:
|
||||||
|
return
|
||||||
|
|
||||||
|
avgs = [lt["avg"] for lt in self.by_node.values()]
|
||||||
|
self.avg = sum(avgs) / len(avgs)
|
||||||
|
|
||||||
|
minimal = [lt["min"] for lt in self.by_node.values()]
|
||||||
|
self.min = min(minimal)
|
||||||
|
|
||||||
|
maximum = [lt["max"] for lt in self.by_node.values()]
|
||||||
|
self.max = max(maximum)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SummarizedStats:
|
||||||
|
threads: int = field(default_factory=int)
|
||||||
|
requested_rate: int = field(default_factory=int)
|
||||||
|
operations: int = field(default_factory=int)
|
||||||
|
rate: float = field(default_factory=float)
|
||||||
|
throughput: float = field(default_factory=float)
|
||||||
|
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
|
||||||
|
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
|
||||||
|
total_bytes: int = field(default_factory=int)
|
||||||
|
passed: bool = True
|
||||||
|
|
||||||
|
def calc_stats(self):
|
||||||
|
self.errors.calc_stats(self.operations)
|
||||||
|
self.latencies.calc_stats()
|
||||||
|
self.passed = self.errors.percent <= self.errors.threshold
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]:
|
||||||
|
if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
|
||||||
|
delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0)
|
||||||
|
write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0)
|
||||||
|
read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0)
|
||||||
|
else:
|
||||||
|
write_vus = load_params.writers
|
||||||
|
read_vus = load_params.readers
|
||||||
|
delete_vus = load_params.deleters
|
||||||
|
|
||||||
|
summarized = {
|
||||||
|
"Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate),
|
||||||
|
"Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate),
|
||||||
|
"Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate),
|
||||||
|
}
|
||||||
|
|
||||||
|
for node_key, load_summary in load_summaries.items():
|
||||||
|
metrics = get_metrics_object(load_params.scenario, load_summary)
|
||||||
|
for operation in metrics.operations:
|
||||||
|
target = summarized[operation._NAME]
|
||||||
|
if not operation.total_iterations:
|
||||||
|
continue
|
||||||
|
target.operations += operation.total_iterations
|
||||||
|
target.rate += operation.rate
|
||||||
|
target.latencies.by_node[node_key] = operation.latency
|
||||||
|
target.throughput += operation.throughput
|
||||||
|
target.errors.threshold = load_params.error_threshold
|
||||||
|
target.total_bytes += operation.total_bytes
|
||||||
|
if operation.failed_iterations:
|
||||||
|
target.errors.by_node[node_key] = operation.failed_iterations
|
||||||
|
|
||||||
|
for operation in summarized.values():
|
||||||
|
operation.calc_stats()
|
||||||
|
|
||||||
|
return summarized
|
|
@ -1,25 +1,27 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import math
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass, fields
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime
|
||||||
|
from threading import Event
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from frostfs_testlib.load.load_config import (
|
from frostfs_testlib import reporter
|
||||||
K6ProcessAllocationStrategy,
|
from frostfs_testlib.credentials.interfaces import User
|
||||||
LoadParams,
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
LoadScenario,
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
|
||||||
LoadType,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.processes.remote_process import RemoteProcess
|
from frostfs_testlib.processes.remote_process import RemoteProcess
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
|
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
|
||||||
EXIT_RESULT_CODE = 0
|
EXIT_RESULT_CODE = 0
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -33,8 +35,6 @@ class LoadResults:
|
||||||
|
|
||||||
class K6:
|
class K6:
|
||||||
_k6_process: RemoteProcess
|
_k6_process: RemoteProcess
|
||||||
_k6_stop_attempts: int = 5
|
|
||||||
_k6_stop_check_interval: int = 15
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -42,142 +42,193 @@ class K6:
|
||||||
endpoints: list[str],
|
endpoints: list[str],
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
load_node: str,
|
loader: Loader,
|
||||||
wallet: WalletInfo,
|
user: User,
|
||||||
):
|
):
|
||||||
if load_params.scenario is None:
|
if load_params.scenario is None:
|
||||||
raise RuntimeError("Scenario should not be none")
|
raise RuntimeError("Scenario should not be none")
|
||||||
|
|
||||||
self.load_params: LoadParams = load_params
|
self.load_params = load_params
|
||||||
self.endpoints = endpoints
|
self.endpoints = endpoints
|
||||||
self.load_node: str = load_node
|
self.loader = loader
|
||||||
self.shell: Shell = shell
|
self.shell = shell
|
||||||
self.wallet = wallet
|
self.user = user
|
||||||
self.scenario: LoadScenario = load_params.scenario
|
self.preset_output: str = ""
|
||||||
self.summary_json: str = os.path.join(
|
self.summary_json: str = os.path.join(
|
||||||
self.load_params.working_dir,
|
self.load_params.working_dir,
|
||||||
f"{self.load_params.load_id}_{self.scenario.value}_summary.json",
|
f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json",
|
||||||
)
|
)
|
||||||
|
|
||||||
self._k6_dir: str = k6_dir
|
self._k6_dir: str = k6_dir
|
||||||
|
|
||||||
|
command = (
|
||||||
|
f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} "
|
||||||
|
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
|
||||||
|
)
|
||||||
|
remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
||||||
|
process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify"
|
||||||
|
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id)
|
||||||
|
|
||||||
|
def _get_fill_percents(self):
|
||||||
|
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n")
|
||||||
|
return [line.split() for line in fill_percents][:-1]
|
||||||
|
|
||||||
|
def check_fill_percent(self):
|
||||||
|
fill_percents = self._get_fill_percents()
|
||||||
|
percent_mean = 0
|
||||||
|
for line in fill_percents:
|
||||||
|
percent_mean += float(line[1].split("%")[0])
|
||||||
|
percent_mean = percent_mean / len(fill_percents)
|
||||||
|
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
|
||||||
|
return percent_mean >= self.load_params.fill_percent
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def process_dir(self) -> str:
|
def process_dir(self) -> str:
|
||||||
return self._k6_process.process_dir
|
return self._k6_process.process_dir
|
||||||
|
|
||||||
@reporter.step_deco("Preset containers and objects")
|
|
||||||
def preset(self) -> str:
|
def preset(self) -> str:
|
||||||
preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py"
|
with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"):
|
||||||
preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py"
|
preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py"
|
||||||
preset_map = {
|
preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py"
|
||||||
LoadType.gRPC: preset_grpc,
|
preset_map = {
|
||||||
LoadType.S3: preset_s3,
|
LoadType.gRPC: preset_grpc,
|
||||||
LoadType.HTTP: preset_grpc,
|
LoadType.S3: preset_s3,
|
||||||
}
|
LoadType.HTTP: preset_grpc,
|
||||||
|
}
|
||||||
|
|
||||||
base_args = {
|
base_args = {
|
||||||
preset_grpc: [
|
preset_grpc: [
|
||||||
preset_grpc,
|
preset_grpc,
|
||||||
f"--endpoint {self.endpoints[0]}",
|
f"--endpoint {','.join(self.endpoints)}",
|
||||||
f"--wallet {self.wallet.path} ",
|
f"--wallet {self.user.wallet.path} ",
|
||||||
f"--config {self.wallet.config_path} ",
|
f"--config {self.user.wallet.config_path} ",
|
||||||
],
|
],
|
||||||
preset_s3: [
|
preset_s3: [
|
||||||
preset_s3,
|
preset_s3,
|
||||||
f"--endpoint {self.endpoints[0]}",
|
f"--endpoint {','.join(self.endpoints)}",
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
preset_scenario = preset_map[self.load_params.load_type]
|
preset_scenario = preset_map[self.load_params.load_type]
|
||||||
command_args = base_args[preset_scenario].copy()
|
command_args = base_args[preset_scenario].copy()
|
||||||
|
|
||||||
command_args += [
|
command_args += self.load_params.get_preset_arguments()
|
||||||
f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'"
|
|
||||||
for field in fields(self.load_params)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["preset_argument"]
|
|
||||||
and getattr(self.load_params, field.name) is not None
|
|
||||||
]
|
|
||||||
|
|
||||||
if self.load_params.preset:
|
command = " ".join(command_args)
|
||||||
command_args += [
|
result = self.shell.exec(command)
|
||||||
f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'"
|
|
||||||
for field in fields(self.load_params.preset)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["preset_argument"]
|
|
||||||
and getattr(self.load_params.preset, field.name) is not None
|
|
||||||
]
|
|
||||||
|
|
||||||
command = " ".join(command_args)
|
assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}"
|
||||||
result = self.shell.exec(command)
|
|
||||||
|
|
||||||
assert (
|
self.preset_output = result.stdout.strip("\n")
|
||||||
result.return_code == EXIT_RESULT_CODE
|
return self.preset_output
|
||||||
), f"Return code of preset is not zero: {result.stdout}"
|
|
||||||
return result.stdout.strip("\n")
|
|
||||||
|
|
||||||
@reporter.step_deco("Generate K6 command")
|
@reporter.step("Generate K6 variables")
|
||||||
def _generate_env_variables(self) -> str:
|
def _generate_k6_variables(self) -> str:
|
||||||
env_vars = {
|
env_vars = self.load_params.get_k6_vars()
|
||||||
field.metadata["env_variable"]: getattr(self.load_params, field.name)
|
|
||||||
for field in fields(self.load_params)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["env_variable"]
|
|
||||||
and getattr(self.load_params, field.name) is not None
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.load_params.preset:
|
|
||||||
env_vars.update(
|
|
||||||
{
|
|
||||||
field.metadata["env_variable"]: getattr(self.load_params.preset, field.name)
|
|
||||||
for field in fields(self.load_params.preset)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["env_variable"]
|
|
||||||
and getattr(self.load_params.preset, field.name) is not None
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
||||||
env_vars["SUMMARY_JSON"] = self.summary_json
|
env_vars["SUMMARY_JSON"] = self.summary_json
|
||||||
|
|
||||||
reporter.attach(
|
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
|
||||||
"\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables"
|
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
|
||||||
)
|
|
||||||
return " ".join(
|
@reporter.step("Generate env variables")
|
||||||
[f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]
|
def _generate_env_variables(self) -> str:
|
||||||
)
|
env_vars = self.load_params.get_env_vars()
|
||||||
|
if not env_vars:
|
||||||
|
return ""
|
||||||
|
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables")
|
||||||
|
return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " "
|
||||||
|
|
||||||
|
def get_start_time(self) -> datetime:
|
||||||
|
return datetime.fromtimestamp(self._k6_process.start_time())
|
||||||
|
|
||||||
|
def get_end_time(self) -> datetime:
|
||||||
|
return datetime.fromtimestamp(self._k6_process.end_time())
|
||||||
|
|
||||||
@reporter.step_deco("Start K6 on initiator")
|
|
||||||
def start(self) -> None:
|
def start(self) -> None:
|
||||||
command = (
|
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
|
self._k6_process.start()
|
||||||
f"{self._k6_dir}/scenarios/{self.scenario.value}.js"
|
|
||||||
)
|
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
|
||||||
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir)
|
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
|
if self.load_params.scenario == LoadScenario.VERIFY:
|
||||||
|
timeout = self.load_params.verify_time or 0
|
||||||
|
else:
|
||||||
|
timeout = self.load_params.load_time or 0
|
||||||
|
|
||||||
|
start_time = int(self.get_start_time().timestamp())
|
||||||
|
|
||||||
|
current_time = int(datetime.utcnow().timestamp())
|
||||||
|
working_time = current_time - start_time
|
||||||
|
remaining_time = timeout - working_time
|
||||||
|
|
||||||
|
setup_teardown_time = (
|
||||||
|
int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip())
|
||||||
|
)
|
||||||
|
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
|
||||||
|
timeout = remaining_time_including_setup_and_teardown
|
||||||
|
|
||||||
|
if soft_timeout:
|
||||||
|
timeout = min(timeout, soft_timeout)
|
||||||
|
|
||||||
|
original_timeout = timeout
|
||||||
|
|
||||||
|
timeouts = {
|
||||||
|
"K6 start time": start_time,
|
||||||
|
"Current time": current_time,
|
||||||
|
"K6 working time": working_time,
|
||||||
|
"Remaining time for load": remaining_time,
|
||||||
|
"Setup and teardown": setup_teardown_time,
|
||||||
|
"Remaining time including setup/teardown": remaining_time_including_setup_and_teardown,
|
||||||
|
"Soft timeout": soft_timeout,
|
||||||
|
"Selected timeout": original_timeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt")
|
||||||
|
|
||||||
|
min_wait_interval = 10
|
||||||
|
wait_interval = min_wait_interval
|
||||||
|
if self._k6_process is None:
|
||||||
|
assert "No k6 instances were executed"
|
||||||
|
|
||||||
|
while timeout > 0:
|
||||||
|
if not self.load_params.fill_percent is None:
|
||||||
|
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
|
||||||
|
if self.check_fill_percent():
|
||||||
|
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
|
||||||
|
event.set()
|
||||||
|
self.stop()
|
||||||
|
return
|
||||||
|
|
||||||
|
if event.is_set():
|
||||||
|
self.stop()
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self._k6_process.running():
|
||||||
|
return
|
||||||
|
|
||||||
|
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
|
||||||
|
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
|
||||||
|
logger.info(
|
||||||
|
f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..."
|
||||||
|
)
|
||||||
|
sleep(wait_interval)
|
||||||
|
timeout -= min(timeout, wait_interval)
|
||||||
|
wait_interval = max(
|
||||||
|
min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval,
|
||||||
|
min_wait_interval,
|
||||||
|
)
|
||||||
|
|
||||||
@reporter.step_deco("Wait until K6 is finished")
|
|
||||||
def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None:
|
|
||||||
wait_interval = 10
|
|
||||||
if self._k6_process is None:
|
|
||||||
assert "No k6 instances were executed"
|
|
||||||
if k6_should_be_running:
|
|
||||||
assert self._k6_process.running(), "k6 should be running."
|
|
||||||
while timeout >= 0:
|
|
||||||
if not self._k6_process.running():
|
if not self._k6_process.running():
|
||||||
return
|
return
|
||||||
logger.info(f"K6 is running. Waiting {wait_interval} seconds...")
|
|
||||||
if timeout > 0:
|
self.stop()
|
||||||
sleep(wait_interval)
|
if not soft_timeout:
|
||||||
timeout -= wait_interval
|
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
|
||||||
self._stop()
|
|
||||||
raise TimeoutError(f"Expected K6 finished in {timeout} sec.")
|
|
||||||
|
|
||||||
def get_results(self) -> Any:
|
def get_results(self) -> Any:
|
||||||
with reporter.step(f"K6 results from {self.load_node}"):
|
with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
self.__log_output()
|
self.__log_output()
|
||||||
|
|
||||||
if not self.summary_json:
|
if not self.summary_json:
|
||||||
|
@ -185,53 +236,33 @@ class K6:
|
||||||
|
|
||||||
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
|
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
|
||||||
summary_json = json.loads(summary_text)
|
summary_json = json.loads(summary_text)
|
||||||
|
endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0]
|
||||||
allure_filenames = {
|
allure_filenames = {
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json",
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json",
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json",
|
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json",
|
||||||
}
|
}
|
||||||
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
|
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
|
||||||
|
|
||||||
reporter.attach(summary_text, allure_filename)
|
reporter.attach(summary_text, allure_filename)
|
||||||
return summary_json
|
return summary_json
|
||||||
|
|
||||||
@reporter.step_deco("Assert K6 should be finished")
|
|
||||||
def _k6_should_be_finished(self) -> None:
|
|
||||||
k6_rc = self._k6_process.rc()
|
|
||||||
assert k6_rc == 0, f"K6 unexpectedly finished with RC {k6_rc}"
|
|
||||||
|
|
||||||
@reporter.step_deco("Terminate K6 on initiator")
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
if not self.is_running:
|
with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
self.get_results()
|
if self.is_running():
|
||||||
raise AssertionError("K6 unexpectedly finished")
|
self._k6_process.stop()
|
||||||
|
|
||||||
self._stop()
|
self._wait_until_process_end()
|
||||||
|
|
||||||
k6_rc = self._k6_process.rc()
|
|
||||||
assert k6_rc == EXIT_RESULT_CODE, f"Return code of K6 job should be 0, but {k6_rc}"
|
|
||||||
|
|
||||||
@property
|
|
||||||
def is_running(self) -> bool:
|
def is_running(self) -> bool:
|
||||||
if self._k6_process:
|
if self._k6_process:
|
||||||
return self._k6_process.running()
|
return self._k6_process.running()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@reporter.step_deco("Try to stop K6 with SIGTERM")
|
@reporter.step("Wait until K6 process end")
|
||||||
def _stop(self) -> None:
|
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout")
|
||||||
self._k6_process.stop()
|
def _wait_until_process_end(self):
|
||||||
with reporter.step("Wait until process end"):
|
return self._k6_process.running()
|
||||||
for _ in range(self._k6_stop_attempts):
|
|
||||||
if not self._k6_process.running():
|
|
||||||
break
|
|
||||||
|
|
||||||
sleep(self._k6_stop_check_interval)
|
|
||||||
else:
|
|
||||||
raise AssertionError("Can not stop K6 process within timeout")
|
|
||||||
|
|
||||||
def _kill(self) -> None:
|
|
||||||
self._k6_process.kill()
|
|
||||||
|
|
||||||
def __log_output(self) -> None:
|
def __log_output(self) -> None:
|
||||||
reporter.attach(self._k6_process.stdout(full=True), "K6 stdout")
|
reporter.attach(self._k6_process.stdout(full=True), "K6 stdout")
|
||||||
reporter.attach(self._k6_process.stderr(full=True), "K6 stderr")
|
reporter.attach(f"{self._k6_process.process_dir}/stderr", "K6 stderr path")
|
||||||
|
|
|
@ -1,7 +1,38 @@
|
||||||
|
import math
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field, fields, is_dataclass
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from types import MappingProxyType
|
||||||
|
from typing import Any, Callable, Optional, get_args
|
||||||
|
|
||||||
|
from frostfs_testlib.utils.converting_utils import calc_unit
|
||||||
|
|
||||||
|
|
||||||
|
def convert_time_to_seconds(time: int | str | None) -> int:
|
||||||
|
if time is None:
|
||||||
|
return None
|
||||||
|
if str(time).isdigit():
|
||||||
|
seconds = int(time)
|
||||||
|
else:
|
||||||
|
days, hours, minutes = 0, 0, 0
|
||||||
|
if "d" in time:
|
||||||
|
days, time = time.split("d")
|
||||||
|
if "h" in time:
|
||||||
|
hours, time = time.split("h")
|
||||||
|
if "min" in time:
|
||||||
|
minutes = time.replace("min", "")
|
||||||
|
seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60
|
||||||
|
return seconds
|
||||||
|
|
||||||
|
|
||||||
|
def force_list(input: str | list[str]):
|
||||||
|
if input is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(input, list):
|
||||||
|
return list(map(str.strip, input))
|
||||||
|
|
||||||
|
return [input.strip()]
|
||||||
|
|
||||||
|
|
||||||
class LoadType(Enum):
|
class LoadType(Enum):
|
||||||
|
@ -15,8 +46,17 @@ class LoadScenario(Enum):
|
||||||
gRPC_CAR = "grpc_car"
|
gRPC_CAR = "grpc_car"
|
||||||
S3 = "s3"
|
S3 = "s3"
|
||||||
S3_CAR = "s3_car"
|
S3_CAR = "s3_car"
|
||||||
|
S3_MULTIPART = "s3_multipart"
|
||||||
|
S3_LOCAL = "s3local"
|
||||||
HTTP = "http"
|
HTTP = "http"
|
||||||
VERIFY = "verify"
|
VERIFY = "verify"
|
||||||
|
LOCAL = "local"
|
||||||
|
|
||||||
|
|
||||||
|
class ReadFrom(Enum):
|
||||||
|
REGISTRY = "registry"
|
||||||
|
PRESET = "preset"
|
||||||
|
MANUAL = "manual"
|
||||||
|
|
||||||
|
|
||||||
all_load_scenarios = [
|
all_load_scenarios = [
|
||||||
|
@ -25,29 +65,57 @@ all_load_scenarios = [
|
||||||
LoadScenario.HTTP,
|
LoadScenario.HTTP,
|
||||||
LoadScenario.S3_CAR,
|
LoadScenario.S3_CAR,
|
||||||
LoadScenario.gRPC_CAR,
|
LoadScenario.gRPC_CAR,
|
||||||
|
LoadScenario.LOCAL,
|
||||||
|
LoadScenario.S3_MULTIPART,
|
||||||
|
LoadScenario.S3_LOCAL,
|
||||||
]
|
]
|
||||||
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
|
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
|
||||||
|
|
||||||
constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP]
|
constant_vus_scenarios = [
|
||||||
|
LoadScenario.gRPC,
|
||||||
|
LoadScenario.S3,
|
||||||
|
LoadScenario.HTTP,
|
||||||
|
LoadScenario.LOCAL,
|
||||||
|
LoadScenario.S3_MULTIPART,
|
||||||
|
LoadScenario.S3_LOCAL,
|
||||||
|
]
|
||||||
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
|
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
|
||||||
|
|
||||||
grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR]
|
grpc_preset_scenarios = [
|
||||||
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR]
|
LoadScenario.gRPC,
|
||||||
|
LoadScenario.HTTP,
|
||||||
|
LoadScenario.gRPC_CAR,
|
||||||
|
LoadScenario.LOCAL,
|
||||||
|
]
|
||||||
|
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MetaField:
|
||||||
|
name: str
|
||||||
|
metadata: MappingProxyType
|
||||||
|
value: Any
|
||||||
|
|
||||||
|
|
||||||
def metadata_field(
|
def metadata_field(
|
||||||
applicable_scenarios: list[LoadScenario],
|
applicable_scenarios: list[LoadScenario],
|
||||||
preset_param: Optional[str] = None,
|
preset_param: Optional[str] = None,
|
||||||
scenario_variable: Optional[str] = None,
|
scenario_variable: Optional[str] = None,
|
||||||
|
string_repr: Optional[bool] = True,
|
||||||
distributed: Optional[bool] = False,
|
distributed: Optional[bool] = False,
|
||||||
|
formatter: Optional[Callable] = None,
|
||||||
|
env_variable: Optional[str] = None,
|
||||||
):
|
):
|
||||||
return field(
|
return field(
|
||||||
default=None,
|
default=None,
|
||||||
metadata={
|
metadata={
|
||||||
"applicable_scenarios": applicable_scenarios,
|
"applicable_scenarios": applicable_scenarios,
|
||||||
"preset_argument": preset_param,
|
"preset_argument": preset_param,
|
||||||
"env_variable": scenario_variable,
|
"scenario_variable": scenario_variable,
|
||||||
|
"string_repr": string_repr,
|
||||||
"distributed": distributed,
|
"distributed": distributed,
|
||||||
|
"formatter": formatter,
|
||||||
|
"env_variable": env_variable,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -61,6 +129,8 @@ class NodesSelectionStrategy(Enum):
|
||||||
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
|
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
|
||||||
# Select ONE random node except under test (useful for failover).
|
# Select ONE random node except under test (useful for failover).
|
||||||
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
|
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
|
||||||
|
# Select node under test
|
||||||
|
NODE_UNDER_TEST = "NODE_UNDER_TEST"
|
||||||
|
|
||||||
|
|
||||||
class EndpointSelectionStrategy(Enum):
|
class EndpointSelectionStrategy(Enum):
|
||||||
|
@ -82,33 +152,75 @@ class K6ProcessAllocationStrategy(Enum):
|
||||||
PER_ENDPOINT = "PER_ENDPOINT"
|
PER_ENDPOINT = "PER_ENDPOINT"
|
||||||
|
|
||||||
|
|
||||||
|
class MetaConfig:
|
||||||
|
def _get_field_formatter(self, field_name: str) -> Callable | None:
|
||||||
|
data_fields = fields(self)
|
||||||
|
formatters = [
|
||||||
|
field.metadata["formatter"]
|
||||||
|
for field in data_fields
|
||||||
|
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
|
||||||
|
]
|
||||||
|
if formatters:
|
||||||
|
return formatters[0]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __setattr__(self, field_name, value):
|
||||||
|
formatter = self._get_field_formatter(field_name)
|
||||||
|
if formatter:
|
||||||
|
value = formatter(value)
|
||||||
|
|
||||||
|
super().__setattr__(field_name, value)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Preset:
|
class Preset(MetaConfig):
|
||||||
# ------ COMMON ------
|
# ------ COMMON ------
|
||||||
# Amount of objects which should be created
|
# Amount of objects which should be created
|
||||||
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None)
|
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False)
|
||||||
# Preset json. Filled automatically.
|
# Preset json. Filled automatically.
|
||||||
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON")
|
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
|
||||||
# Workers count for preset
|
# Workers count for preset
|
||||||
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None)
|
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
|
||||||
|
# Acl for container/buckets
|
||||||
|
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
|
||||||
|
|
||||||
# ------ GRPC ------
|
# ------ GRPC ------
|
||||||
# Amount of containers which should be created
|
# Amount of containers which should be created
|
||||||
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None)
|
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False)
|
||||||
# Container placement policy for containers for gRPC
|
# Container placement policy for containers for gRPC
|
||||||
container_placement_policy: Optional[str] = metadata_field(
|
container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list)
|
||||||
grpc_preset_scenarios, "policy", None
|
# Number of retries for creation of container
|
||||||
)
|
container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False)
|
||||||
|
|
||||||
# ------ S3 ------
|
# ------ S3 ------
|
||||||
# Amount of buckets which should be created
|
# Amount of buckets which should be created
|
||||||
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None)
|
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False)
|
||||||
# S3 region (AKA placement policy for S3 buckets)
|
# S3 region (AKA placement policy for S3 buckets)
|
||||||
s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None)
|
s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list)
|
||||||
|
|
||||||
|
# Delay between containers creation and object upload for preset
|
||||||
|
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
|
||||||
|
|
||||||
|
# Flag to control preset erorrs
|
||||||
|
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
|
||||||
|
|
||||||
|
# Flag to ensure created containers store data on local endpoints
|
||||||
|
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class LoadParams:
|
class PrometheusParams(MetaConfig):
|
||||||
|
# Prometheus server URL
|
||||||
|
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
|
||||||
|
# Prometheus trend stats
|
||||||
|
trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False)
|
||||||
|
# Additional tags
|
||||||
|
metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LoadParams(MetaConfig):
|
||||||
# ------- CONTROL PARAMS -------
|
# ------- CONTROL PARAMS -------
|
||||||
# Load type can be gRPC, HTTP, S3.
|
# Load type can be gRPC, HTTP, S3.
|
||||||
load_type: LoadType
|
load_type: LoadType
|
||||||
|
@ -125,87 +237,253 @@ class LoadParams:
|
||||||
verify: Optional[bool] = None
|
verify: Optional[bool] = None
|
||||||
# Just id for load so distinct it between runs. Filled automatically.
|
# Just id for load so distinct it between runs. Filled automatically.
|
||||||
load_id: Optional[str] = None
|
load_id: Optional[str] = None
|
||||||
|
# Acceptable number of load errors in %
|
||||||
|
# 100 means 100% errors allowed
|
||||||
|
# 1.5 means 1.5% errors allowed
|
||||||
|
# 0 means no errors allowed
|
||||||
|
error_threshold: Optional[float] = None
|
||||||
# Working directory
|
# Working directory
|
||||||
working_dir: Optional[str] = None
|
working_dir: Optional[str] = None
|
||||||
# Preset for the k6 run
|
# Preset for the k6 run
|
||||||
preset: Optional[Preset] = None
|
preset: Optional[Preset] = None
|
||||||
|
# K6 download url
|
||||||
|
k6_url: Optional[str] = None
|
||||||
|
# Requests module url
|
||||||
|
requests_module_url: Optional[str] = None
|
||||||
|
# aws cli download url
|
||||||
|
awscli_url: Optional[str] = None
|
||||||
|
# No ssl verification flag
|
||||||
|
no_verify_ssl: Optional[bool] = metadata_field(
|
||||||
|
[
|
||||||
|
LoadScenario.S3,
|
||||||
|
LoadScenario.S3_CAR,
|
||||||
|
LoadScenario.S3_MULTIPART,
|
||||||
|
LoadScenario.S3_LOCAL,
|
||||||
|
LoadScenario.VERIFY,
|
||||||
|
LoadScenario.HTTP,
|
||||||
|
],
|
||||||
|
"no-verify-ssl",
|
||||||
|
"NO_VERIFY_SSL",
|
||||||
|
False,
|
||||||
|
)
|
||||||
|
# Percentage of filling of all data disks on all nodes
|
||||||
|
fill_percent: Optional[float] = None
|
||||||
|
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
|
||||||
|
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
|
||||||
|
# if set, the payload is generated on the fly and is not read into memory fully.
|
||||||
|
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
|
||||||
|
# Output format
|
||||||
|
output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False)
|
||||||
|
# Prometheus params
|
||||||
|
prometheus: Optional[PrometheusParams] = None
|
||||||
|
|
||||||
# ------- COMMON SCENARIO PARAMS -------
|
# ------- COMMON SCENARIO PARAMS -------
|
||||||
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
|
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
|
||||||
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION")
|
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds)
|
||||||
# Object size in KB for load and preset.
|
# Object size in KB for load and preset.
|
||||||
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE")
|
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
|
||||||
|
# For read operations, controls from which set get objects to read
|
||||||
|
read_from: Optional[ReadFrom] = None
|
||||||
|
# For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation
|
||||||
|
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False)
|
||||||
# Output registry K6 file. Filled automatically.
|
# Output registry K6 file. Filled automatically.
|
||||||
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE")
|
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
|
||||||
|
# In case if we want to use custom registry file left from another load run
|
||||||
|
custom_registry: Optional[str] = None
|
||||||
|
# In case if we want to use custom registry file left from another load run
|
||||||
|
force_fresh_registry: Optional[bool] = None
|
||||||
# Specifies the minimum duration of every single execution (i.e. iteration).
|
# Specifies the minimum duration of every single execution (i.e. iteration).
|
||||||
# Any iterations that are shorter than this value will cause that VU to
|
# Any iterations that are shorter than this value will cause that VU to
|
||||||
# sleep for the remainder of the time until the specified minimum duration is reached.
|
# sleep for the remainder of the time until the specified minimum duration is reached.
|
||||||
min_iteration_duration: Optional[str] = metadata_field(
|
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False)
|
||||||
all_load_scenarios, None, "K6_MIN_ITERATION_DURATION"
|
# Prepare/cut objects locally on client before sending
|
||||||
)
|
prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False)
|
||||||
|
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
|
||||||
|
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
|
||||||
|
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
|
||||||
|
|
||||||
|
# Delay for read operations in case if we read from registry
|
||||||
|
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False)
|
||||||
|
|
||||||
|
# Initialization time for each VU for k6 load
|
||||||
|
vu_init_time: Optional[float] = None
|
||||||
|
|
||||||
# ------- CONSTANT VUS SCENARIO PARAMS -------
|
# ------- CONSTANT VUS SCENARIO PARAMS -------
|
||||||
# Amount of Writers VU.
|
# Amount of Writers VU.
|
||||||
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True)
|
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True)
|
||||||
# Amount of Readers VU.
|
# Amount of Readers VU.
|
||||||
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True)
|
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True)
|
||||||
# Amount of Deleters VU.
|
# Amount of Deleters VU.
|
||||||
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True)
|
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True)
|
||||||
|
|
||||||
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
|
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
|
||||||
# Number of iterations to start during each timeUnit period for write.
|
# Number of iterations to start during each timeUnit period for write.
|
||||||
write_rate: Optional[int] = metadata_field(
|
write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True)
|
||||||
constant_arrival_rate_scenarios, None, "WRITE_RATE", True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Number of iterations to start during each timeUnit period for read.
|
# Number of iterations to start during each timeUnit period for read.
|
||||||
read_rate: Optional[int] = metadata_field(
|
read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True)
|
||||||
constant_arrival_rate_scenarios, None, "READ_RATE", True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Number of iterations to start during each timeUnit period for delete.
|
# Number of iterations to start during each timeUnit period for delete.
|
||||||
delete_rate: Optional[int] = metadata_field(
|
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True)
|
||||||
constant_arrival_rate_scenarios, None, "DELETE_RATE", True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for write operations.
|
# Amount of preAllocatedVUs for write operations.
|
||||||
preallocated_writers: Optional[int] = metadata_field(
|
preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True)
|
||||||
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True
|
|
||||||
)
|
|
||||||
# Amount of maxVUs for write operations.
|
# Amount of maxVUs for write operations.
|
||||||
max_writers: Optional[int] = metadata_field(
|
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True)
|
||||||
constant_arrival_rate_scenarios, None, "MAX_WRITERS", True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for read operations.
|
# Amount of preAllocatedVUs for read operations.
|
||||||
preallocated_readers: Optional[int] = metadata_field(
|
preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True)
|
||||||
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True
|
|
||||||
)
|
|
||||||
# Amount of maxVUs for read operations.
|
# Amount of maxVUs for read operations.
|
||||||
max_readers: Optional[int] = metadata_field(
|
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True)
|
||||||
constant_arrival_rate_scenarios, None, "MAX_READERS", True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for read operations.
|
# Amount of preAllocatedVUs for read operations.
|
||||||
preallocated_deleters: Optional[int] = metadata_field(
|
preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True)
|
||||||
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True
|
|
||||||
)
|
|
||||||
# Amount of maxVUs for delete operations.
|
# Amount of maxVUs for delete operations.
|
||||||
max_deleters: Optional[int] = metadata_field(
|
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True)
|
||||||
constant_arrival_rate_scenarios, None, "MAX_DELETERS", True
|
|
||||||
)
|
# Multipart
|
||||||
|
# Number of parts to upload in parallel
|
||||||
|
writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True)
|
||||||
|
# part size must be greater than (5 MB)
|
||||||
|
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
|
||||||
|
|
||||||
# Period of time to apply the rate value.
|
# Period of time to apply the rate value.
|
||||||
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT")
|
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False)
|
||||||
|
|
||||||
# ------- VERIFY SCENARIO PARAMS -------
|
# ------- VERIFY SCENARIO PARAMS -------
|
||||||
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
|
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
|
||||||
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT")
|
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False)
|
||||||
# Amount of Verification VU.
|
# Amount of Verification VU.
|
||||||
clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS")
|
verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False)
|
||||||
|
|
||||||
|
# ------- LOCAL SCENARIO PARAMS -------
|
||||||
|
# Config file location (filled automatically)
|
||||||
|
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
|
||||||
|
# Config directory location (filled automatically)
|
||||||
|
config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
|
||||||
|
|
||||||
def set_id(self, load_id):
|
def set_id(self, load_id):
|
||||||
self.load_id = load_id
|
self.load_id = load_id
|
||||||
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
|
|
||||||
|
if self.read_from == ReadFrom.REGISTRY:
|
||||||
|
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
|
||||||
|
|
||||||
|
# For now it's okay to have it this way
|
||||||
|
if self.custom_registry is not None:
|
||||||
|
self.registry_file = self.custom_registry
|
||||||
|
|
||||||
|
if self.read_from == ReadFrom.PRESET:
|
||||||
|
self.registry_file = None
|
||||||
|
|
||||||
if self.preset:
|
if self.preset:
|
||||||
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
|
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
|
||||||
|
|
||||||
|
def get_k6_vars(self):
|
||||||
|
env_vars = {
|
||||||
|
meta_field.metadata["scenario_variable"]: meta_field.value
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"]
|
||||||
|
and meta_field.metadata["scenario_variable"]
|
||||||
|
and meta_field.value is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
def get_env_vars(self):
|
||||||
|
env_vars = {
|
||||||
|
meta_field.metadata["env_variable"]: meta_field.value
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"]
|
||||||
|
and meta_field.metadata["env_variable"]
|
||||||
|
and meta_field.value is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
default_scenario_map = {
|
||||||
|
LoadType.gRPC: LoadScenario.gRPC,
|
||||||
|
LoadType.HTTP: LoadScenario.HTTP,
|
||||||
|
LoadType.S3: LoadScenario.S3,
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.scenario is None:
|
||||||
|
self.scenario = default_scenario_map[self.load_type]
|
||||||
|
|
||||||
|
def get_preset_arguments(self):
|
||||||
|
command_args = [
|
||||||
|
self._get_preset_argument(meta_field)
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"]
|
||||||
|
and meta_field.metadata["preset_argument"]
|
||||||
|
and meta_field.value is not None
|
||||||
|
and self._get_preset_argument(meta_field)
|
||||||
|
]
|
||||||
|
|
||||||
|
return command_args
|
||||||
|
|
||||||
|
def get_init_time(self) -> int:
|
||||||
|
return math.ceil(self._get_total_vus() * self.vu_init_time)
|
||||||
|
|
||||||
|
def _get_total_vus(self) -> int:
|
||||||
|
vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"]
|
||||||
|
data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields]
|
||||||
|
return sum(data_fields)
|
||||||
|
|
||||||
|
def _get_applicable_fields(self):
|
||||||
|
applicable_fields = [
|
||||||
|
meta_field
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value
|
||||||
|
]
|
||||||
|
|
||||||
|
return applicable_fields
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_preset_argument(meta_field: MetaField) -> str:
|
||||||
|
if isinstance(meta_field.value, bool):
|
||||||
|
# For preset calls, bool values are passed with just --<argument_name> if the value is True
|
||||||
|
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
|
||||||
|
|
||||||
|
if isinstance(meta_field.value, list):
|
||||||
|
return (
|
||||||
|
" ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else ""
|
||||||
|
)
|
||||||
|
|
||||||
|
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_meta_fields(instance) -> list[MetaField]:
|
||||||
|
data_fields = fields(instance)
|
||||||
|
|
||||||
|
fields_with_data = [
|
||||||
|
MetaField(field.name, field.metadata, getattr(instance, field.name))
|
||||||
|
for field in data_fields
|
||||||
|
if field.metadata and getattr(instance, field.name) is not None
|
||||||
|
]
|
||||||
|
|
||||||
|
for field in data_fields:
|
||||||
|
actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type)
|
||||||
|
if is_dataclass(actual_field_type) and getattr(instance, field.name):
|
||||||
|
fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name))
|
||||||
|
|
||||||
|
return fields_with_data or []
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
load_type_str = self.scenario.value if self.scenario else self.load_type.value
|
||||||
|
# TODO: migrate load_params defaults to testlib
|
||||||
|
if self.object_size is not None:
|
||||||
|
size, unit = calc_unit(self.object_size, 1)
|
||||||
|
static_params = [f"{load_type_str} {size:.4g} {unit}"]
|
||||||
|
else:
|
||||||
|
static_params = [f"{load_type_str}"]
|
||||||
|
|
||||||
|
dynamic_params = [
|
||||||
|
f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"]
|
||||||
|
]
|
||||||
|
params = ", ".join(static_params + dynamic_params)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return self.__str__()
|
||||||
|
|
|
@ -1,83 +1,54 @@
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from typing import Any
|
from typing import Any, Optional
|
||||||
|
|
||||||
from frostfs_testlib.load.load_config import LoadScenario
|
from frostfs_testlib.load.load_config import LoadScenario
|
||||||
|
|
||||||
|
|
||||||
class MetricsBase(ABC):
|
class OperationMetric(ABC):
|
||||||
_WRITE_SUCCESS = ""
|
_NAME = ""
|
||||||
_WRITE_ERRORS = ""
|
_SUCCESS = ""
|
||||||
_WRITE_THROUGHPUT = "data_sent"
|
_ERRORS = ""
|
||||||
|
_THROUGHPUT = ""
|
||||||
_READ_SUCCESS = ""
|
_LATENCY = ""
|
||||||
_READ_ERRORS = ""
|
|
||||||
_READ_THROUGHPUT = "data_received"
|
|
||||||
|
|
||||||
_DELETE_SUCCESS = ""
|
|
||||||
_DELETE_ERRORS = ""
|
|
||||||
|
|
||||||
def __init__(self, summary) -> None:
|
def __init__(self, summary) -> None:
|
||||||
self.summary = summary
|
self.summary = summary
|
||||||
self.metrics = summary["metrics"]
|
self.metrics = summary["metrics"]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_total_iterations(self) -> int:
|
def total_iterations(self) -> int:
|
||||||
return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS)
|
return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_success_iterations(self) -> int:
|
def success_iterations(self) -> int:
|
||||||
return self._get_metric(self._WRITE_SUCCESS)
|
return self._get_metric(self._SUCCESS)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_rate(self) -> float:
|
def latency(self) -> dict:
|
||||||
return self._get_metric_rate(self._WRITE_SUCCESS)
|
return self._get_metric(self._LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_failed_iterations(self) -> int:
|
def rate(self) -> float:
|
||||||
return self._get_metric(self._WRITE_ERRORS)
|
return self._get_metric_rate(self._SUCCESS)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_throughput(self) -> float:
|
def failed_iterations(self) -> int:
|
||||||
return self._get_metric_rate(self._WRITE_THROUGHPUT)
|
return self._get_metric(self._ERRORS)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def read_total_iterations(self) -> int:
|
def throughput(self) -> float:
|
||||||
return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS)
|
return self._get_metric_rate(self._THROUGHPUT)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def read_success_iterations(self) -> int:
|
def total_bytes(self) -> float:
|
||||||
return self._get_metric(self._READ_SUCCESS)
|
return self._get_metric(self._THROUGHPUT)
|
||||||
|
|
||||||
@property
|
|
||||||
def read_rate(self) -> int:
|
|
||||||
return self._get_metric_rate(self._READ_SUCCESS)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def read_failed_iterations(self) -> int:
|
|
||||||
return self._get_metric(self._READ_ERRORS)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def read_throughput(self) -> float:
|
|
||||||
return self._get_metric_rate(self._READ_THROUGHPUT)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def delete_total_iterations(self) -> int:
|
|
||||||
return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def delete_success_iterations(self) -> int:
|
|
||||||
return self._get_metric(self._DELETE_SUCCESS)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def delete_failed_iterations(self) -> int:
|
|
||||||
return self._get_metric(self._DELETE_ERRORS)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def delete_rate(self) -> int:
|
|
||||||
return self._get_metric_rate(self._DELETE_SUCCESS)
|
|
||||||
|
|
||||||
def _get_metric(self, metric: str) -> int:
|
def _get_metric(self, metric: str) -> int:
|
||||||
metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric}
|
metrics_method_map = {
|
||||||
|
"counter": self._get_counter_metric,
|
||||||
|
"gauge": self._get_gauge_metric,
|
||||||
|
"trend": self._get_trend_metrics,
|
||||||
|
}
|
||||||
|
|
||||||
if metric not in self.metrics:
|
if metric not in self.metrics:
|
||||||
return 0
|
return 0
|
||||||
|
@ -85,9 +56,7 @@ class MetricsBase(ABC):
|
||||||
metric = self.metrics[metric]
|
metric = self.metrics[metric]
|
||||||
metric_type = metric["type"]
|
metric_type = metric["type"]
|
||||||
if metric_type not in metrics_method_map:
|
if metric_type not in metrics_method_map:
|
||||||
raise Exception(
|
raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}")
|
||||||
f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return metrics_method_map[metric_type](metric)
|
return metrics_method_map[metric_type](metric)
|
||||||
|
|
||||||
|
@ -100,9 +69,7 @@ class MetricsBase(ABC):
|
||||||
metric = self.metrics[metric]
|
metric = self.metrics[metric]
|
||||||
metric_type = metric["type"]
|
metric_type = metric["type"]
|
||||||
if metric_type not in metrics_method_map:
|
if metric_type not in metrics_method_map:
|
||||||
raise Exception(
|
raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}")
|
||||||
f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return metrics_method_map[metric_type](metric)
|
return metrics_method_map[metric_type](metric)
|
||||||
|
|
||||||
|
@ -115,38 +82,149 @@ class MetricsBase(ABC):
|
||||||
def _get_gauge_metric(self, metric: str) -> int:
|
def _get_gauge_metric(self, metric: str) -> int:
|
||||||
return metric["values"]["value"]
|
return metric["values"]["value"]
|
||||||
|
|
||||||
|
def _get_trend_metrics(self, metric: str) -> int:
|
||||||
|
return metric["values"]
|
||||||
|
|
||||||
|
|
||||||
|
class WriteOperationMetric(OperationMetric):
|
||||||
|
_NAME = "Write"
|
||||||
|
_SUCCESS = ""
|
||||||
|
_ERRORS = ""
|
||||||
|
_THROUGHPUT = "data_sent"
|
||||||
|
_LATENCY = ""
|
||||||
|
|
||||||
|
|
||||||
|
class ReadOperationMetric(OperationMetric):
|
||||||
|
_NAME = "Read"
|
||||||
|
_SUCCESS = ""
|
||||||
|
_ERRORS = ""
|
||||||
|
_THROUGHPUT = "data_received"
|
||||||
|
_LATENCY = ""
|
||||||
|
|
||||||
|
|
||||||
|
class DeleteOperationMetric(OperationMetric):
|
||||||
|
_NAME = "Delete"
|
||||||
|
_SUCCESS = ""
|
||||||
|
_ERRORS = ""
|
||||||
|
_THROUGHPUT = ""
|
||||||
|
_LATENCY = ""
|
||||||
|
|
||||||
|
|
||||||
|
class GrpcWriteOperationMetric(WriteOperationMetric):
|
||||||
|
_SUCCESS = "frostfs_obj_put_success"
|
||||||
|
_ERRORS = "frostfs_obj_put_fails"
|
||||||
|
_LATENCY = "frostfs_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class GrpcReadOperationMetric(ReadOperationMetric):
|
||||||
|
_SUCCESS = "frostfs_obj_get_success"
|
||||||
|
_ERRORS = "frostfs_obj_get_fails"
|
||||||
|
_LATENCY = "frostfs_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class GrpcDeleteOperationMetric(DeleteOperationMetric):
|
||||||
|
_SUCCESS = "frostfs_obj_delete_success"
|
||||||
|
_ERRORS = "frostfs_obj_delete_fails"
|
||||||
|
_LATENCY = "frostfs_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class S3WriteOperationMetric(WriteOperationMetric):
|
||||||
|
_SUCCESS = "aws_obj_put_success"
|
||||||
|
_ERRORS = "aws_obj_put_fails"
|
||||||
|
_LATENCY = "aws_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class S3ReadOperationMetric(ReadOperationMetric):
|
||||||
|
_SUCCESS = "aws_obj_get_success"
|
||||||
|
_ERRORS = "aws_obj_get_fails"
|
||||||
|
_LATENCY = "aws_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class S3DeleteOperationMetric(DeleteOperationMetric):
|
||||||
|
_SUCCESS = "aws_obj_delete_success"
|
||||||
|
_ERRORS = "aws_obj_delete_fails"
|
||||||
|
_LATENCY = "aws_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class S3LocalWriteOperationMetric(WriteOperationMetric):
|
||||||
|
_SUCCESS = "s3local_obj_put_success"
|
||||||
|
_ERRORS = "s3local_obj_put_fails"
|
||||||
|
_LATENCY = "s3local_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class S3LocalReadOperationMetric(ReadOperationMetric):
|
||||||
|
_SUCCESS = "s3local_obj_get_success"
|
||||||
|
_ERRORS = "s3local_obj_get_fails"
|
||||||
|
_LATENCY = "s3local_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class LocalWriteOperationMetric(WriteOperationMetric):
|
||||||
|
_SUCCESS = "local_obj_put_success"
|
||||||
|
_ERRORS = "local_obj_put_fails"
|
||||||
|
_LATENCY = "local_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
|
class LocalReadOperationMetric(ReadOperationMetric):
|
||||||
|
_SUCCESS = "local_obj_get_success"
|
||||||
|
_ERRORS = "local_obj_get_fails"
|
||||||
|
|
||||||
|
|
||||||
|
class LocalDeleteOperationMetric(DeleteOperationMetric):
|
||||||
|
_SUCCESS = "local_obj_delete_success"
|
||||||
|
_ERRORS = "local_obj_delete_fails"
|
||||||
|
|
||||||
|
|
||||||
|
class VerifyReadOperationMetric(ReadOperationMetric):
|
||||||
|
_SUCCESS = "verified_obj"
|
||||||
|
_ERRORS = "invalid_obj"
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsBase(ABC):
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.write: Optional[WriteOperationMetric] = None
|
||||||
|
self.read: Optional[ReadOperationMetric] = None
|
||||||
|
self.delete: Optional[DeleteOperationMetric] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def operations(self) -> list[OperationMetric]:
|
||||||
|
return [metric for metric in [self.write, self.read, self.delete] if metric is not None]
|
||||||
|
|
||||||
|
|
||||||
class GrpcMetrics(MetricsBase):
|
class GrpcMetrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "frostfs_obj_put_total"
|
def __init__(self, summary) -> None:
|
||||||
_WRITE_ERRORS = "frostfs_obj_put_fails"
|
super().__init__()
|
||||||
|
self.write = GrpcWriteOperationMetric(summary)
|
||||||
_READ_SUCCESS = "frostfs_obj_get_total"
|
self.read = GrpcReadOperationMetric(summary)
|
||||||
_READ_ERRORS = "frostfs_obj_get_fails"
|
self.delete = GrpcDeleteOperationMetric(summary)
|
||||||
|
|
||||||
_DELETE_SUCCESS = "frostfs_obj_delete_total"
|
|
||||||
_DELETE_ERRORS = "frostfs_obj_delete_fails"
|
|
||||||
|
|
||||||
|
|
||||||
class S3Metrics(MetricsBase):
|
class S3Metrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "aws_obj_put_total"
|
def __init__(self, summary) -> None:
|
||||||
_WRITE_ERRORS = "aws_obj_put_fails"
|
super().__init__()
|
||||||
|
self.write = S3WriteOperationMetric(summary)
|
||||||
|
self.read = S3ReadOperationMetric(summary)
|
||||||
|
self.delete = S3DeleteOperationMetric(summary)
|
||||||
|
|
||||||
_READ_SUCCESS = "aws_obj_get_total"
|
|
||||||
_READ_ERRORS = "aws_obj_get_fails"
|
|
||||||
|
|
||||||
_DELETE_SUCCESS = "aws_obj_delete_total"
|
class S3LocalMetrics(MetricsBase):
|
||||||
_DELETE_ERRORS = "aws_obj_delete_fails"
|
def __init__(self, summary) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.write = S3LocalWriteOperationMetric(summary)
|
||||||
|
self.read = S3LocalReadOperationMetric(summary)
|
||||||
|
|
||||||
|
|
||||||
|
class LocalMetrics(MetricsBase):
|
||||||
|
def __init__(self, summary) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.write = LocalWriteOperationMetric(summary)
|
||||||
|
self.read = LocalReadOperationMetric(summary)
|
||||||
|
self.delete = LocalDeleteOperationMetric(summary)
|
||||||
|
|
||||||
|
|
||||||
class VerifyMetrics(MetricsBase):
|
class VerifyMetrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "N/A"
|
def __init__(self, summary) -> None:
|
||||||
_WRITE_ERRORS = "N/A"
|
super().__init__()
|
||||||
|
self.read = VerifyReadOperationMetric(summary)
|
||||||
_READ_SUCCESS = "verified_obj"
|
|
||||||
_READ_ERRORS = "invalid_obj"
|
|
||||||
|
|
||||||
_DELETE_SUCCESS = "N/A"
|
|
||||||
_DELETE_ERRORS = "N/A"
|
|
||||||
|
|
||||||
|
|
||||||
def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase:
|
def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase:
|
||||||
|
@ -156,7 +234,10 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr
|
||||||
LoadScenario.HTTP: GrpcMetrics,
|
LoadScenario.HTTP: GrpcMetrics,
|
||||||
LoadScenario.S3: S3Metrics,
|
LoadScenario.S3: S3Metrics,
|
||||||
LoadScenario.S3_CAR: S3Metrics,
|
LoadScenario.S3_CAR: S3Metrics,
|
||||||
|
LoadScenario.S3_MULTIPART: S3Metrics,
|
||||||
|
LoadScenario.S3_LOCAL: S3LocalMetrics,
|
||||||
LoadScenario.VERIFY: VerifyMetrics,
|
LoadScenario.VERIFY: VerifyMetrics,
|
||||||
|
LoadScenario.LOCAL: LocalMetrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
return class_map[load_type](summary)
|
return class_map[load_type](summary)
|
||||||
|
|
|
@ -1,36 +1,43 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional, Tuple
|
from typing import Optional
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
|
||||||
from frostfs_testlib.load.load_metrics import get_metrics_object
|
from frostfs_testlib.utils.converting_utils import calc_unit
|
||||||
|
|
||||||
|
|
||||||
class LoadReport:
|
class LoadReport:
|
||||||
def __init__(self, load_test) -> None:
|
def __init__(self, load_test) -> None:
|
||||||
self.load_test = load_test
|
self.load_test = load_test
|
||||||
self.load_summaries: Optional[dict] = None
|
# List of load summaries dict
|
||||||
|
self.load_summaries_list: Optional[list[dict]] = []
|
||||||
self.load_params: Optional[LoadParams] = None
|
self.load_params: Optional[LoadParams] = None
|
||||||
self.start_time: Optional[datetime] = None
|
self.start_time: Optional[datetime] = None
|
||||||
self.end_time: Optional[datetime] = None
|
self.end_time: Optional[datetime] = None
|
||||||
|
|
||||||
def set_start_time(self):
|
def set_start_time(self, time: datetime = None):
|
||||||
self.start_time = datetime.utcnow()
|
if time is None:
|
||||||
|
time = datetime.utcnow()
|
||||||
|
self.start_time = time
|
||||||
|
|
||||||
def set_end_time(self):
|
def set_end_time(self, time: datetime = None):
|
||||||
self.end_time = datetime.utcnow()
|
if time is None:
|
||||||
|
time = datetime.utcnow()
|
||||||
|
self.end_time = time
|
||||||
|
|
||||||
def set_summaries(self, load_summaries: dict):
|
def add_summaries(self, load_summaries: dict):
|
||||||
self.load_summaries = load_summaries
|
self.load_summaries_list.append(load_summaries)
|
||||||
|
|
||||||
def set_load_params(self, load_params: LoadParams):
|
def set_load_params(self, load_params: LoadParams):
|
||||||
self.load_params = load_params
|
self.load_params = load_params
|
||||||
|
|
||||||
def get_report_html(self):
|
def get_report_html(self):
|
||||||
report_sections = [
|
report_sections = [
|
||||||
|
[self.load_params, self._get_load_id_section_html],
|
||||||
[self.load_test, self._get_load_params_section_html],
|
[self.load_test, self._get_load_params_section_html],
|
||||||
[self.load_summaries, self._get_totals_section_html],
|
[self.load_summaries_list, self._get_totals_section_html],
|
||||||
[self.end_time, self._get_test_time_html],
|
[self.end_time, self._get_test_time_html],
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -42,8 +49,8 @@ class LoadReport:
|
||||||
return html
|
return html
|
||||||
|
|
||||||
def _get_load_params_section_html(self) -> str:
|
def _get_load_params_section_html(self) -> str:
|
||||||
params: str = yaml.safe_dump(self.load_test, sort_keys=False)
|
params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True)
|
||||||
params = params.replace("\n", "<br>")
|
params = params.replace("\n", "<br>").replace(" ", " ")
|
||||||
section_html = f"""<h3>Scenario params</h3>
|
section_html = f"""<h3>Scenario params</h3>
|
||||||
|
|
||||||
<pre>{params}</pre>
|
<pre>{params}</pre>
|
||||||
|
@ -51,25 +58,23 @@ class LoadReport:
|
||||||
|
|
||||||
return section_html
|
return section_html
|
||||||
|
|
||||||
|
def _get_load_id_section_html(self) -> str:
|
||||||
|
section_html = f"""<h3>Load ID: {self.load_params.load_id}</h3>
|
||||||
|
<hr>"""
|
||||||
|
|
||||||
|
return section_html
|
||||||
|
|
||||||
def _get_test_time_html(self) -> str:
|
def _get_test_time_html(self) -> str:
|
||||||
html = f"""<h3>Scenario duration in UTC time (from agent)</h3>
|
if not self.start_time or not self.end_time:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
html = f"""<h3>Scenario duration</h3>
|
||||||
{self.start_time} - {self.end_time}<br>
|
{self.start_time} - {self.end_time}<br>
|
||||||
<hr>
|
<hr>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
||||||
def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]:
|
|
||||||
units = ["B", "KB", "MB", "GB", "TB"]
|
|
||||||
|
|
||||||
for unit in units[skip_units:]:
|
|
||||||
if value < 1024:
|
|
||||||
return value, unit
|
|
||||||
|
|
||||||
value = value / 1024.0
|
|
||||||
|
|
||||||
return value, unit
|
|
||||||
|
|
||||||
def _seconds_to_formatted_duration(self, seconds: int) -> str:
|
def _seconds_to_formatted_duration(self, seconds: int) -> str:
|
||||||
"""Converts N number of seconds to formatted output ignoring zeroes.
|
"""Converts N number of seconds to formatted output ignoring zeroes.
|
||||||
Examples:
|
Examples:
|
||||||
|
@ -99,167 +104,75 @@ class LoadReport:
|
||||||
model_map = {
|
model_map = {
|
||||||
LoadScenario.gRPC: "closed model",
|
LoadScenario.gRPC: "closed model",
|
||||||
LoadScenario.S3: "closed model",
|
LoadScenario.S3: "closed model",
|
||||||
|
LoadScenario.S3_MULTIPART: "closed model",
|
||||||
LoadScenario.HTTP: "closed model",
|
LoadScenario.HTTP: "closed model",
|
||||||
LoadScenario.gRPC_CAR: "open model",
|
LoadScenario.gRPC_CAR: "open model",
|
||||||
LoadScenario.S3_CAR: "open model",
|
LoadScenario.S3_CAR: "open model",
|
||||||
|
LoadScenario.LOCAL: "local fill",
|
||||||
|
LoadScenario.S3_LOCAL: "local fill",
|
||||||
}
|
}
|
||||||
|
|
||||||
return model_map[self.load_params.scenario]
|
return model_map[self.load_params.scenario]
|
||||||
|
|
||||||
def _get_oprations_sub_section_html(
|
def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats):
|
||||||
self,
|
|
||||||
operation_type: str,
|
|
||||||
total_operations: int,
|
|
||||||
requested_rate_str: str,
|
|
||||||
vus_str: str,
|
|
||||||
total_rate: float,
|
|
||||||
throughput: float,
|
|
||||||
errors: dict[str, int],
|
|
||||||
):
|
|
||||||
throughput_html = ""
|
throughput_html = ""
|
||||||
if throughput > 0:
|
if stats.throughput > 0:
|
||||||
throughput, unit = self._calc_unit(throughput)
|
throughput, unit = calc_unit(stats.throughput)
|
||||||
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
||||||
|
|
||||||
per_node_errors_html = ""
|
bytes_html = ""
|
||||||
total_errors = 0
|
if stats.total_bytes > 0:
|
||||||
if errors:
|
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
|
||||||
total_errors: int = 0
|
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
|
||||||
for node_key, errors in errors.items():
|
|
||||||
total_errors += errors
|
|
||||||
if (
|
|
||||||
self.load_params.k6_process_allocation_strategy
|
|
||||||
== K6ProcessAllocationStrategy.PER_ENDPOINT
|
|
||||||
):
|
|
||||||
per_node_errors_html += self._row(f"At {node_key}", errors)
|
|
||||||
|
|
||||||
object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1)
|
per_node_errors_html = ""
|
||||||
|
for node_key, errors in stats.errors.by_node.items():
|
||||||
|
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
|
||||||
|
per_node_errors_html += self._row(f"At {node_key}", errors)
|
||||||
|
|
||||||
|
latency_html = ""
|
||||||
|
for node_key, latencies in stats.latencies.by_node.items():
|
||||||
|
latency_values = "N/A"
|
||||||
|
if latencies:
|
||||||
|
latency_values = ""
|
||||||
|
for param_name, param_val in latencies.items():
|
||||||
|
latency_values += f"{param_name}={param_val:.2f}ms "
|
||||||
|
|
||||||
|
latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values)
|
||||||
|
|
||||||
|
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
|
||||||
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
|
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
|
||||||
model = self._get_model_string()
|
model = self._get_model_string()
|
||||||
|
requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else ""
|
||||||
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
|
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
|
||||||
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s"
|
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s"
|
||||||
|
|
||||||
html = f"""
|
html = f"""
|
||||||
<table border="1" cellpadding="5px"><tbody>
|
<table border="1" cellpadding="5px"><tbody>
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
|
||||||
{self._row("Total operations", total_operations)}
|
{self._row("Total operations", stats.operations)}
|
||||||
{self._row("OP/sec", f"{total_rate:.2f}")}
|
{self._row("OP/sec", f"{stats.rate:.2f}")}
|
||||||
|
{bytes_html}
|
||||||
{throughput_html}
|
{throughput_html}
|
||||||
|
{latency_html}
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
||||||
{per_node_errors_html}
|
{per_node_errors_html}
|
||||||
{self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")}
|
{self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")}
|
||||||
|
{self._row("Threshold", f"{stats.errors.threshold:.2f}%")}
|
||||||
</tbody></table><br><hr>
|
</tbody></table><br><hr>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
||||||
def _get_totals_section_html(self):
|
def _get_totals_section_html(self):
|
||||||
|
html = ""
|
||||||
|
for i in range(len(self.load_summaries_list)):
|
||||||
|
html += f"<h3>Load Results for load #{i+1}</h3>"
|
||||||
|
|
||||||
html = "<h3>Load Results</h3>"
|
summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i])
|
||||||
|
for operation_type, stats in summarized.items():
|
||||||
write_operations = 0
|
if stats.operations:
|
||||||
write_op_sec = 0
|
html += self._get_operations_sub_section_html(operation_type, stats)
|
||||||
write_throughput = 0
|
|
||||||
write_errors = {}
|
|
||||||
requested_write_rate = self.load_params.write_rate
|
|
||||||
requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else ""
|
|
||||||
|
|
||||||
read_operations = 0
|
|
||||||
read_op_sec = 0
|
|
||||||
read_throughput = 0
|
|
||||||
read_errors = {}
|
|
||||||
requested_read_rate = self.load_params.read_rate
|
|
||||||
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
|
|
||||||
|
|
||||||
delete_operations = 0
|
|
||||||
delete_op_sec = 0
|
|
||||||
delete_errors = {}
|
|
||||||
requested_delete_rate = self.load_params.delete_rate
|
|
||||||
requested_delete_rate_str = (
|
|
||||||
f"{requested_delete_rate}op/sec" if requested_delete_rate else ""
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
|
|
||||||
delete_vus = max(
|
|
||||||
self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0
|
|
||||||
)
|
|
||||||
write_vus = max(
|
|
||||||
self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0
|
|
||||||
)
|
|
||||||
read_vus = max(
|
|
||||||
self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
write_vus = self.load_params.writers
|
|
||||||
read_vus = self.load_params.readers
|
|
||||||
delete_vus = self.load_params.deleters
|
|
||||||
|
|
||||||
write_vus_str = f"{write_vus}th"
|
|
||||||
read_vus_str = f"{read_vus}th"
|
|
||||||
delete_vus_str = f"{delete_vus}th"
|
|
||||||
|
|
||||||
write_section_required = False
|
|
||||||
read_section_required = False
|
|
||||||
delete_section_required = False
|
|
||||||
|
|
||||||
for node_key, load_summary in self.load_summaries.items():
|
|
||||||
metrics = get_metrics_object(self.load_params.scenario, load_summary)
|
|
||||||
write_operations += metrics.write_total_iterations
|
|
||||||
if write_operations:
|
|
||||||
write_section_required = True
|
|
||||||
write_op_sec += metrics.write_rate
|
|
||||||
write_throughput += metrics.write_throughput
|
|
||||||
if metrics.write_failed_iterations:
|
|
||||||
write_errors[node_key] = metrics.write_failed_iterations
|
|
||||||
|
|
||||||
read_operations += metrics.read_total_iterations
|
|
||||||
if read_operations:
|
|
||||||
read_section_required = True
|
|
||||||
read_op_sec += metrics.read_rate
|
|
||||||
read_throughput += metrics.read_throughput
|
|
||||||
if metrics.read_failed_iterations:
|
|
||||||
read_errors[node_key] = metrics.read_failed_iterations
|
|
||||||
|
|
||||||
delete_operations += metrics.delete_total_iterations
|
|
||||||
if delete_operations:
|
|
||||||
delete_section_required = True
|
|
||||||
delete_op_sec += metrics.delete_rate
|
|
||||||
if metrics.delete_failed_iterations:
|
|
||||||
delete_errors[node_key] = metrics.delete_failed_iterations
|
|
||||||
|
|
||||||
if write_section_required:
|
|
||||||
html += self._get_oprations_sub_section_html(
|
|
||||||
"Write",
|
|
||||||
write_operations,
|
|
||||||
requested_write_rate_str,
|
|
||||||
write_vus_str,
|
|
||||||
write_op_sec,
|
|
||||||
write_throughput,
|
|
||||||
write_errors,
|
|
||||||
)
|
|
||||||
|
|
||||||
if read_section_required:
|
|
||||||
html += self._get_oprations_sub_section_html(
|
|
||||||
"Read",
|
|
||||||
read_operations,
|
|
||||||
requested_read_rate_str,
|
|
||||||
read_vus_str,
|
|
||||||
read_op_sec,
|
|
||||||
read_throughput,
|
|
||||||
read_errors,
|
|
||||||
)
|
|
||||||
|
|
||||||
if delete_section_required:
|
|
||||||
html += self._get_oprations_sub_section_html(
|
|
||||||
"Delete",
|
|
||||||
delete_operations,
|
|
||||||
requested_delete_rate_str,
|
|
||||||
delete_vus_str,
|
|
||||||
delete_op_sec,
|
|
||||||
0,
|
|
||||||
delete_errors,
|
|
||||||
)
|
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
|
@ -1,184 +0,0 @@
|
||||||
import copy
|
|
||||||
import itertools
|
|
||||||
import math
|
|
||||||
import re
|
|
||||||
from dataclasses import fields
|
|
||||||
|
|
||||||
from frostfs_testlib.cli import FrostfsAuthmate
|
|
||||||
from frostfs_testlib.load.k6 import K6
|
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
|
||||||
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR
|
|
||||||
from frostfs_testlib.shell import CommandOptions, SSHShell
|
|
||||||
from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
STOPPED_HOSTS = []
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Init s3 client on load nodes")
|
|
||||||
def init_s3_client(
|
|
||||||
load_nodes: list[str],
|
|
||||||
load_params: LoadParams,
|
|
||||||
k6_directory: str,
|
|
||||||
ssh_credentials: SshCredentials,
|
|
||||||
nodes_under_load: list[ClusterNode],
|
|
||||||
wallet: WalletInfo,
|
|
||||||
):
|
|
||||||
storage_node = nodes_under_load[0].service(StorageNode)
|
|
||||||
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load]
|
|
||||||
grpc_peer = storage_node.get_rpc_endpoint()
|
|
||||||
|
|
||||||
for load_node in load_nodes:
|
|
||||||
ssh_client = _get_ssh_client(ssh_credentials, load_node)
|
|
||||||
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC)
|
|
||||||
issue_secret_output = frostfs_authmate_exec.secret.issue(
|
|
||||||
wallet=wallet.path,
|
|
||||||
peer=grpc_peer,
|
|
||||||
bearer_rules=f"{k6_directory}/scenarios/files/rules.json",
|
|
||||||
gate_public_key=s3_public_keys,
|
|
||||||
container_placement_policy=load_params.preset.container_placement_policy,
|
|
||||||
container_policy=f"{k6_directory}/scenarios/files/policy.json",
|
|
||||||
wallet_password=wallet.password,
|
|
||||||
).stdout
|
|
||||||
aws_access_key_id = str(
|
|
||||||
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
|
||||||
"aws_access_key_id"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
aws_secret_access_key = str(
|
|
||||||
re.search(
|
|
||||||
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output
|
|
||||||
).group("aws_secret_access_key")
|
|
||||||
)
|
|
||||||
# prompt_pattern doesn't work at the moment
|
|
||||||
configure_input = [
|
|
||||||
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
|
|
||||||
InteractiveInput(
|
|
||||||
prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key
|
|
||||||
),
|
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
|
||||||
]
|
|
||||||
ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Prepare K6 instances and objects")
|
|
||||||
def prepare_k6_instances(
|
|
||||||
load_nodes: list[str],
|
|
||||||
ssh_credentials: SshCredentials,
|
|
||||||
k6_dir: str,
|
|
||||||
load_params: LoadParams,
|
|
||||||
endpoints: list[str],
|
|
||||||
loaders_wallet: WalletInfo,
|
|
||||||
) -> list[K6]:
|
|
||||||
k6_load_objects: list[K6] = []
|
|
||||||
nodes = itertools.cycle(load_nodes)
|
|
||||||
|
|
||||||
k6_distribution_count = {
|
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes),
|
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints),
|
|
||||||
}
|
|
||||||
endpoints_generators = {
|
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
|
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle(
|
|
||||||
[[endpoint] for endpoint in endpoints]
|
|
||||||
),
|
|
||||||
}
|
|
||||||
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
|
|
||||||
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
|
|
||||||
|
|
||||||
distributed_load_params_list = _get_distributed_load_params_list(
|
|
||||||
load_params, k6_processes_count
|
|
||||||
)
|
|
||||||
|
|
||||||
for distributed_load_params in distributed_load_params_list:
|
|
||||||
load_node = next(nodes)
|
|
||||||
ssh_client = _get_ssh_client(ssh_credentials, load_node)
|
|
||||||
k6_load_object = K6(
|
|
||||||
distributed_load_params,
|
|
||||||
next(endpoints_gen),
|
|
||||||
k6_dir,
|
|
||||||
ssh_client,
|
|
||||||
load_node,
|
|
||||||
loaders_wallet,
|
|
||||||
)
|
|
||||||
k6_load_objects.append(k6_load_object)
|
|
||||||
if load_params.preset:
|
|
||||||
k6_load_object.preset()
|
|
||||||
|
|
||||||
return k6_load_objects
|
|
||||||
|
|
||||||
|
|
||||||
def _get_ssh_client(ssh_credentials: SshCredentials, load_node: str):
|
|
||||||
ssh_client = SSHShell(
|
|
||||||
host=load_node,
|
|
||||||
login=ssh_credentials.ssh_login,
|
|
||||||
password=ssh_credentials.ssh_password,
|
|
||||||
private_key_path=ssh_credentials.ssh_key_path,
|
|
||||||
private_key_passphrase=ssh_credentials.ssh_key_passphrase,
|
|
||||||
)
|
|
||||||
|
|
||||||
return ssh_client
|
|
||||||
|
|
||||||
|
|
||||||
def _get_distributed_load_params_list(
|
|
||||||
original_load_params: LoadParams, workers_count: int
|
|
||||||
) -> list[LoadParams]:
|
|
||||||
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
|
|
||||||
distributed_load_params: list[LoadParams] = []
|
|
||||||
|
|
||||||
for i in range(workers_count):
|
|
||||||
load_params = copy.deepcopy(original_load_params)
|
|
||||||
# Append #i here in case if multiple k6 processes goes into same load node
|
|
||||||
load_params.set_id(f"{load_params.load_id}_{i}")
|
|
||||||
distributed_load_params.append(load_params)
|
|
||||||
|
|
||||||
load_fields = fields(original_load_params)
|
|
||||||
|
|
||||||
for field in load_fields:
|
|
||||||
if (
|
|
||||||
field.metadata
|
|
||||||
and original_load_params.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["distributed"]
|
|
||||||
and getattr(original_load_params, field.name) is not None
|
|
||||||
):
|
|
||||||
original_value = getattr(original_load_params, field.name)
|
|
||||||
distribution = _get_distribution(math.ceil(original_value / divisor), workers_count)
|
|
||||||
for i in range(workers_count):
|
|
||||||
setattr(distributed_load_params[i], field.name, distribution[i])
|
|
||||||
|
|
||||||
return distributed_load_params
|
|
||||||
|
|
||||||
|
|
||||||
def _get_distribution(clients_count: int, workers_count: int) -> list[int]:
|
|
||||||
"""
|
|
||||||
This function will distribute evenly as possible X clients to Y workers.
|
|
||||||
For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers)
|
|
||||||
this will return [38, 38, 37, 37].
|
|
||||||
|
|
||||||
Args:
|
|
||||||
clients_count: amount of things needs to be distributed.
|
|
||||||
workers_count: amount of workers.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list of distribution.
|
|
||||||
"""
|
|
||||||
if workers_count < 1:
|
|
||||||
raise Exception("Workers cannot be less then 1")
|
|
||||||
|
|
||||||
# Amount of guaranteed payload on one worker
|
|
||||||
clients_per_worker = clients_count // workers_count
|
|
||||||
# Remainder of clients left to be distributed
|
|
||||||
remainder = clients_count - clients_per_worker * workers_count
|
|
||||||
|
|
||||||
distribution = [
|
|
||||||
clients_per_worker + 1 if i < remainder else clients_per_worker
|
|
||||||
for i in range(workers_count)
|
|
||||||
]
|
|
||||||
return distribution
|
|
|
@ -1,36 +1,68 @@
|
||||||
import logging
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
|
||||||
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
|
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
|
||||||
from frostfs_testlib.load.load_metrics import get_metrics_object
|
from frostfs_testlib.load.load_metrics import get_metrics_object
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
|
||||||
|
|
||||||
|
|
||||||
class LoadVerifier:
|
class LoadVerifier:
|
||||||
def __init__(self, load_params: LoadParams) -> None:
|
def __init__(self, load_params: LoadParams) -> None:
|
||||||
self.load_params = load_params
|
self.load_params = load_params
|
||||||
|
|
||||||
def verify_summaries(self, load_summary, verification_summary) -> None:
|
def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]:
|
||||||
if not verification_summary or not load_summary:
|
summarized = SummarizedStats.collect(self.load_params, load_summaries)
|
||||||
logger.info("Can't check load results due to missing summary")
|
issues = []
|
||||||
|
|
||||||
|
for operation_type, stats in summarized.items():
|
||||||
|
if stats.threads and not stats.operations:
|
||||||
|
issues.append(f"No any {operation_type.lower()} operation was performed")
|
||||||
|
|
||||||
|
if stats.errors.percent > stats.errors.threshold:
|
||||||
|
rate_str = self._get_rate_str(stats.errors.percent)
|
||||||
|
issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%")
|
||||||
|
|
||||||
|
return issues
|
||||||
|
|
||||||
|
def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]:
|
||||||
|
verify_issues: list[str] = []
|
||||||
|
for k6_process_label in load_summaries:
|
||||||
|
with reporter.step(f"Check verify scenario results for {k6_process_label}"):
|
||||||
|
verify_issues.extend(
|
||||||
|
self._collect_verify_issues_on_process(
|
||||||
|
k6_process_label,
|
||||||
|
load_summaries[k6_process_label],
|
||||||
|
verification_summaries[k6_process_label],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return verify_issues
|
||||||
|
|
||||||
|
def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str:
|
||||||
|
return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%"
|
||||||
|
|
||||||
|
def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]:
|
||||||
|
issues = []
|
||||||
|
|
||||||
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
|
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
|
||||||
writers = self.load_params.writers or 0
|
|
||||||
|
|
||||||
objects_count = load_metrics.write_success_iterations
|
writers = self.load_params.writers or self.load_params.preallocated_writers or 0
|
||||||
fails_count = load_metrics.write_failed_iterations
|
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
|
||||||
|
|
||||||
if writers > 0:
|
delete_success = 0
|
||||||
assert objects_count > 0, "Total put objects should be greater than 0"
|
|
||||||
assert fails_count == 0, f"There were {fails_count} failed put objects operations"
|
if deleters > 0:
|
||||||
|
delete_success = load_metrics.delete.success_iterations
|
||||||
|
|
||||||
if verification_summary:
|
if verification_summary:
|
||||||
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
|
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
|
||||||
verified_objects = verify_metrics.read_success_iterations
|
verified_objects = verify_metrics.read.success_iterations
|
||||||
invalid_objects = verify_metrics.read_failed_iterations
|
invalid_objects = verify_metrics.read.failed_iterations
|
||||||
|
total_left_objects = load_metrics.write.success_iterations - delete_success
|
||||||
|
|
||||||
assert invalid_objects == 0, f"There were {invalid_objects} verification fails"
|
if invalid_objects > 0:
|
||||||
|
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).")
|
||||||
# Due to interruptions we may see total verified objects to be less than written on writers count
|
# Due to interruptions we may see total verified objects to be less than written on writers count
|
||||||
assert (
|
if abs(total_left_objects - verified_objects) > writers:
|
||||||
abs(objects_count - verified_objects) <= writers
|
issues.append(
|
||||||
), f"Verified objects is less than total objects. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}."
|
f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}."
|
||||||
|
)
|
||||||
|
|
||||||
|
return issues
|
||||||
|
|
60
src/frostfs_testlib/load/loaders.py
Normal file
60
src/frostfs_testlib/load/loaders.py
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
|
from frostfs_testlib.resources.load_params import (
|
||||||
|
LOAD_NODE_SSH_PASSWORD,
|
||||||
|
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
|
||||||
|
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
|
LOAD_NODE_SSH_USER,
|
||||||
|
)
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell, SshCredentials
|
||||||
|
from frostfs_testlib.shell.ssh_shell import SSHShell
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
|
class RemoteLoader(Loader):
|
||||||
|
def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None:
|
||||||
|
self.ssh_credentials = ssh_credentials
|
||||||
|
self._ip = ip
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ip(self):
|
||||||
|
return self._ip
|
||||||
|
|
||||||
|
def get_shell(self) -> Shell:
|
||||||
|
ssh_client = SSHShell(
|
||||||
|
host=self.ip,
|
||||||
|
login=self.ssh_credentials.ssh_login,
|
||||||
|
password=self.ssh_credentials.ssh_password,
|
||||||
|
private_key_path=self.ssh_credentials.ssh_key_path,
|
||||||
|
private_key_passphrase=self.ssh_credentials.ssh_key_passphrase,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ssh_client
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_ip_list(cls, ip_list: list[str]) -> list[Loader]:
|
||||||
|
loaders: list[Loader] = []
|
||||||
|
ssh_credentials = SshCredentials(
|
||||||
|
LOAD_NODE_SSH_USER,
|
||||||
|
LOAD_NODE_SSH_PASSWORD,
|
||||||
|
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
|
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
for ip in ip_list:
|
||||||
|
loaders.append(RemoteLoader(ssh_credentials, ip))
|
||||||
|
|
||||||
|
return loaders
|
||||||
|
|
||||||
|
|
||||||
|
class NodeLoader(Loader):
|
||||||
|
"""When ClusterNode is the loader for itself (for Local scenario only)."""
|
||||||
|
|
||||||
|
def __init__(self, cluster_node: ClusterNode) -> None:
|
||||||
|
self.cluster_node = cluster_node
|
||||||
|
|
||||||
|
def get_shell(self) -> Shell:
|
||||||
|
return self.cluster_node.host.get_shell()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ip(self):
|
||||||
|
return self.cluster_node.host_ip
|
466
src/frostfs_testlib/load/runners.py
Normal file
466
src/frostfs_testlib/load/runners.py
Normal file
|
@ -0,0 +1,466 @@
|
||||||
|
import copy
|
||||||
|
import itertools
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
from dataclasses import fields
|
||||||
|
from threading import Event
|
||||||
|
from typing import Optional
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.credentials.interfaces import S3Credentials, User
|
||||||
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
|
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
||||||
|
from frostfs_testlib.load.k6 import K6
|
||||||
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
|
||||||
|
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
||||||
|
from frostfs_testlib.resources import optionals
|
||||||
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
|
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
|
||||||
|
from frostfs_testlib.shell.command_inspectors import SuInspector
|
||||||
|
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
||||||
|
from frostfs_testlib.testing import parallel, run_optionally
|
||||||
|
from frostfs_testlib.testing.test_control import retry
|
||||||
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
from frostfs_testlib.utils.file_keeper import FileKeeper
|
||||||
|
|
||||||
|
|
||||||
|
class RunnerBase(ScenarioRunner):
|
||||||
|
k6_instances: list[K6]
|
||||||
|
loaders: list[Loader]
|
||||||
|
|
||||||
|
@reporter.step("Run preset on loaders")
|
||||||
|
def preset(self):
|
||||||
|
parallel([k6.preset for k6 in self.k6_instances])
|
||||||
|
|
||||||
|
@reporter.step("Wait until load finish")
|
||||||
|
def wait_until_finish(self, soft_timeout: int = 0):
|
||||||
|
event = Event()
|
||||||
|
parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_running(self):
|
||||||
|
futures = parallel([k6.is_running for k6 in self.k6_instances])
|
||||||
|
|
||||||
|
return any([future.result() for future in futures])
|
||||||
|
|
||||||
|
def get_k6_instances(self):
|
||||||
|
return self.k6_instances
|
||||||
|
|
||||||
|
def get_loaders(self) -> list[Loader]:
|
||||||
|
return self.loaders
|
||||||
|
|
||||||
|
|
||||||
|
class DefaultRunner(RunnerBase):
|
||||||
|
user: User
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
user: User,
|
||||||
|
load_ip_list: Optional[list[str]] = None,
|
||||||
|
) -> None:
|
||||||
|
if load_ip_list is None:
|
||||||
|
load_ip_list = LOAD_NODES
|
||||||
|
self.loaders = RemoteLoader.from_ip_list(load_ip_list)
|
||||||
|
self.user = user
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
@reporter.step("Preparation steps")
|
||||||
|
def prepare(
|
||||||
|
self,
|
||||||
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
|
nodes_under_load: list[ClusterNode],
|
||||||
|
k6_dir: str,
|
||||||
|
):
|
||||||
|
if load_params.force_fresh_registry and load_params.custom_registry:
|
||||||
|
with reporter.step("Forcing fresh registry files"):
|
||||||
|
parallel(self._force_fresh_registry, self.loaders, load_params)
|
||||||
|
|
||||||
|
if load_params.load_type != LoadType.S3:
|
||||||
|
return
|
||||||
|
|
||||||
|
with reporter.step("Init s3 client on loaders"):
|
||||||
|
s3_credentials = self.user.s3_credentials
|
||||||
|
parallel(self._aws_configure_on_loader, self.loaders, s3_credentials)
|
||||||
|
|
||||||
|
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
|
||||||
|
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
shell.exec(f"rm -f {load_params.registry_file}")
|
||||||
|
|
||||||
|
def _aws_configure_on_loader(
|
||||||
|
self,
|
||||||
|
loader: Loader,
|
||||||
|
s3_credentials: S3Credentials,
|
||||||
|
):
|
||||||
|
with reporter.step(f"Aws configure on {loader.ip}"):
|
||||||
|
configure_input = [
|
||||||
|
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key),
|
||||||
|
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key),
|
||||||
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
|
]
|
||||||
|
loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input))
|
||||||
|
|
||||||
|
@reporter.step("Init k6 instances")
|
||||||
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
self.k6_instances = []
|
||||||
|
cycled_loaders = itertools.cycle(self.loaders)
|
||||||
|
|
||||||
|
k6_distribution_count = {
|
||||||
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders),
|
||||||
|
K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints),
|
||||||
|
}
|
||||||
|
endpoints_generators = {
|
||||||
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
|
||||||
|
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]),
|
||||||
|
}
|
||||||
|
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
|
||||||
|
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
|
||||||
|
|
||||||
|
distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count)
|
||||||
|
|
||||||
|
futures = parallel(
|
||||||
|
self._init_k6_instance,
|
||||||
|
distributed_load_params_list,
|
||||||
|
loader=cycled_loaders,
|
||||||
|
endpoints=endpoints_gen,
|
||||||
|
k6_dir=k6_dir,
|
||||||
|
)
|
||||||
|
self.k6_instances = [future.result() for future in futures]
|
||||||
|
|
||||||
|
def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
|
||||||
|
with reporter.step(f"Make working directory"):
|
||||||
|
shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}")
|
||||||
|
shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}")
|
||||||
|
|
||||||
|
return K6(
|
||||||
|
load_params_for_loader,
|
||||||
|
endpoints,
|
||||||
|
k6_dir,
|
||||||
|
shell,
|
||||||
|
loader,
|
||||||
|
self.user,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]:
|
||||||
|
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
|
||||||
|
distributed_load_params: list[LoadParams] = []
|
||||||
|
|
||||||
|
for i in range(workers_count):
|
||||||
|
load_params = copy.deepcopy(original_load_params)
|
||||||
|
# Append #i here in case if multiple k6 processes goes into same load node
|
||||||
|
load_params.set_id(f"{load_params.load_id}_{i}")
|
||||||
|
distributed_load_params.append(load_params)
|
||||||
|
|
||||||
|
load_fields = fields(original_load_params)
|
||||||
|
|
||||||
|
for field in load_fields:
|
||||||
|
if (
|
||||||
|
field.metadata
|
||||||
|
and original_load_params.scenario in field.metadata["applicable_scenarios"]
|
||||||
|
and field.metadata["distributed"]
|
||||||
|
and getattr(original_load_params, field.name) is not None
|
||||||
|
):
|
||||||
|
original_value = getattr(original_load_params, field.name)
|
||||||
|
distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count)
|
||||||
|
for i in range(workers_count):
|
||||||
|
setattr(distributed_load_params[i], field.name, distribution[i])
|
||||||
|
|
||||||
|
return distributed_load_params
|
||||||
|
|
||||||
|
def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]:
|
||||||
|
"""
|
||||||
|
This function will distribute evenly as possible X clients to Y workers.
|
||||||
|
For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers)
|
||||||
|
this will return [38, 38, 37, 37].
|
||||||
|
|
||||||
|
Args:
|
||||||
|
clients_count: amount of things needs to be distributed.
|
||||||
|
workers_count: amount of workers.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list of distribution.
|
||||||
|
"""
|
||||||
|
if workers_count < 1:
|
||||||
|
raise Exception("Workers cannot be less then 1")
|
||||||
|
|
||||||
|
# Amount of guaranteed payload on one worker
|
||||||
|
clients_per_worker = clients_count // workers_count
|
||||||
|
# Remainder of clients left to be distributed
|
||||||
|
remainder = clients_count - clients_per_worker * workers_count
|
||||||
|
|
||||||
|
distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)]
|
||||||
|
return distribution
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
load_params = self.k6_instances[0].load_params
|
||||||
|
|
||||||
|
parallel([k6.start for k6 in self.k6_instances])
|
||||||
|
|
||||||
|
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
|
||||||
|
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
|
||||||
|
time.sleep(wait_after_start_time)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
for k6_instance in self.k6_instances:
|
||||||
|
k6_instance.stop()
|
||||||
|
|
||||||
|
def get_results(self) -> dict:
|
||||||
|
results = {}
|
||||||
|
for k6_instance in self.k6_instances:
|
||||||
|
if k6_instance.load_params.k6_process_allocation_strategy is None:
|
||||||
|
raise RuntimeError("k6_process_allocation_strategy should not be none")
|
||||||
|
|
||||||
|
result = k6_instance.get_results()
|
||||||
|
endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0]
|
||||||
|
keys_map = {
|
||||||
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip,
|
||||||
|
K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint,
|
||||||
|
}
|
||||||
|
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
|
||||||
|
results[key] = result
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
class LocalRunner(RunnerBase):
|
||||||
|
cluster_state_controller: ClusterStateController
|
||||||
|
file_keeper: FileKeeper
|
||||||
|
user: User
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
cluster_state_controller: ClusterStateController,
|
||||||
|
file_keeper: FileKeeper,
|
||||||
|
nodes_under_load: list[ClusterNode],
|
||||||
|
user: User,
|
||||||
|
) -> None:
|
||||||
|
self.cluster_state_controller = cluster_state_controller
|
||||||
|
self.file_keeper = file_keeper
|
||||||
|
self.loaders = [NodeLoader(node) for node in nodes_under_load]
|
||||||
|
self.nodes_under_load = nodes_under_load
|
||||||
|
self.user = user
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
@reporter.step("Preparation steps")
|
||||||
|
def prepare(
|
||||||
|
self,
|
||||||
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
|
nodes_under_load: list[ClusterNode],
|
||||||
|
k6_dir: str,
|
||||||
|
):
|
||||||
|
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params)
|
||||||
|
|
||||||
|
@retry(3, 5, expected_result=True)
|
||||||
|
def allow_user_to_login_in_system(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
|
||||||
|
result = None
|
||||||
|
try:
|
||||||
|
shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}")
|
||||||
|
self.lock_passwd_on_node(cluster_node)
|
||||||
|
options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)])
|
||||||
|
result = shell.exec("whoami", options)
|
||||||
|
finally:
|
||||||
|
if not result or result.return_code:
|
||||||
|
self.restore_passwd_on_node(cluster_node)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
@reporter.step("Prepare node {cluster_node}")
|
||||||
|
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
|
||||||
|
with reporter.step("Allow storage user to login into system"):
|
||||||
|
self.allow_user_to_login_in_system(cluster_node)
|
||||||
|
|
||||||
|
with reporter.step("Update limits.conf"):
|
||||||
|
limits_path = "/etc/security/limits.conf"
|
||||||
|
self.file_keeper.add(cluster_node.storage_node, limits_path)
|
||||||
|
content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
|
||||||
|
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
|
||||||
|
|
||||||
|
with reporter.step("Download K6"):
|
||||||
|
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
||||||
|
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
||||||
|
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
|
||||||
|
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
||||||
|
|
||||||
|
with reporter.step("chmod 777 wallet related files on loader"):
|
||||||
|
shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}")
|
||||||
|
shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}")
|
||||||
|
|
||||||
|
@reporter.step("Init k6 instances")
|
||||||
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
self.k6_instances = []
|
||||||
|
futures = parallel(
|
||||||
|
self._init_k6_instance,
|
||||||
|
self.loaders,
|
||||||
|
load_params,
|
||||||
|
k6_dir,
|
||||||
|
)
|
||||||
|
self.k6_instances = [future.result() for future in futures]
|
||||||
|
|
||||||
|
def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
with reporter.step(f"Init K6 instance on {loader.ip}"):
|
||||||
|
with reporter.step(f"Make working directory"):
|
||||||
|
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
|
||||||
|
# If we chmod /home/<user_name> folder we can no longer ssh to the node
|
||||||
|
# !! IMPORTANT !!
|
||||||
|
if (
|
||||||
|
load_params.working_dir
|
||||||
|
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
|
||||||
|
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
|
||||||
|
):
|
||||||
|
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
|
||||||
|
|
||||||
|
return K6(
|
||||||
|
load_params,
|
||||||
|
["localhost:8080"],
|
||||||
|
k6_dir,
|
||||||
|
shell,
|
||||||
|
loader,
|
||||||
|
self.user,
|
||||||
|
)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
load_params = self.k6_instances[0].load_params
|
||||||
|
|
||||||
|
self.cluster_state_controller.stop_services_of_type(S3Gate)
|
||||||
|
self.cluster_state_controller.stop_services_of_type(StorageNode)
|
||||||
|
|
||||||
|
parallel([k6.start for k6 in self.k6_instances])
|
||||||
|
|
||||||
|
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
|
||||||
|
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
|
||||||
|
time.sleep(wait_after_start_time)
|
||||||
|
|
||||||
|
@reporter.step("Restore passwd on {cluster_node}")
|
||||||
|
def restore_passwd_on_node(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
shell.exec("sudo chattr -i /etc/passwd")
|
||||||
|
|
||||||
|
@reporter.step("Lock passwd on {cluster_node}")
|
||||||
|
def lock_passwd_on_node(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
shell.exec("sudo chattr +i /etc/passwd")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
for k6_instance in self.k6_instances:
|
||||||
|
k6_instance.stop()
|
||||||
|
|
||||||
|
self.cluster_state_controller.start_all_stopped_services()
|
||||||
|
|
||||||
|
def get_results(self) -> dict:
|
||||||
|
results = {}
|
||||||
|
for k6_instance in self.k6_instances:
|
||||||
|
result = k6_instance.get_results()
|
||||||
|
results[k6_instance.loader.ip] = result
|
||||||
|
|
||||||
|
parallel(self.restore_passwd_on_node, self.nodes_under_load)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
class S3LocalRunner(LocalRunner):
|
||||||
|
endpoints: list[str]
|
||||||
|
k6_dir: str
|
||||||
|
|
||||||
|
@reporter.step("Run preset on loaders")
|
||||||
|
def preset(self):
|
||||||
|
LocalRunner.preset(self)
|
||||||
|
with reporter.step(f"Resolve containers in preset"):
|
||||||
|
parallel(self._resolve_containers_in_preset, self.k6_instances)
|
||||||
|
|
||||||
|
@reporter.step("Resolve containers in preset")
|
||||||
|
def _resolve_containers_in_preset(self, k6_instance: K6):
|
||||||
|
k6_instance.shell.exec(
|
||||||
|
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@reporter.step("Init k6 instances")
|
||||||
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
self.k6_instances = []
|
||||||
|
futures = parallel(
|
||||||
|
self._init_k6_instance_,
|
||||||
|
self.loaders,
|
||||||
|
load_params,
|
||||||
|
endpoints,
|
||||||
|
k6_dir,
|
||||||
|
)
|
||||||
|
self.k6_instances = [future.result() for future in futures]
|
||||||
|
|
||||||
|
def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
|
||||||
|
with reporter.step(f"Make working directory"):
|
||||||
|
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
|
||||||
|
# If we chmod /home/<user_name> folder we can no longer ssh to the node
|
||||||
|
# !! IMPORTANT !!
|
||||||
|
if (
|
||||||
|
load_params.working_dir
|
||||||
|
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
|
||||||
|
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
|
||||||
|
):
|
||||||
|
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
|
||||||
|
|
||||||
|
return K6(
|
||||||
|
load_params,
|
||||||
|
self.endpoints,
|
||||||
|
k6_dir,
|
||||||
|
shell,
|
||||||
|
loader,
|
||||||
|
self.user,
|
||||||
|
)
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
@reporter.step("Preparation steps")
|
||||||
|
def prepare(
|
||||||
|
self,
|
||||||
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
|
nodes_under_load: list[ClusterNode],
|
||||||
|
k6_dir: str,
|
||||||
|
):
|
||||||
|
self.k6_dir = k6_dir
|
||||||
|
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes)
|
||||||
|
|
||||||
|
@reporter.step("Prepare node {cluster_node}")
|
||||||
|
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]):
|
||||||
|
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
|
||||||
|
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
|
||||||
|
with reporter.step("Uninstall previous installation of aws cli"):
|
||||||
|
shell.exec(f"sudo rm -rf /usr/local/aws-cli")
|
||||||
|
shell.exec(f"sudo rm -rf /usr/local/bin/aws")
|
||||||
|
shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer")
|
||||||
|
|
||||||
|
with reporter.step("Install aws cli"):
|
||||||
|
shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip")
|
||||||
|
shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}")
|
||||||
|
shell.exec(f"sudo {k6_dir}/aws/install")
|
||||||
|
|
||||||
|
with reporter.step("Install requests python module"):
|
||||||
|
shell.exec(f"sudo apt-get -y install python3-pip")
|
||||||
|
shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}")
|
||||||
|
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
|
||||||
|
|
||||||
|
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
|
||||||
|
configure_input = [
|
||||||
|
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key),
|
||||||
|
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key),
|
||||||
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
|
]
|
||||||
|
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
|
|
@ -1,12 +1,6 @@
|
||||||
import sys
|
from importlib.metadata import entry_points
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
if sys.version_info < (3, 10):
|
|
||||||
# On Python prior 3.10 we need to use backport of entry points
|
|
||||||
from importlib_metadata import entry_points
|
|
||||||
else:
|
|
||||||
from importlib.metadata import entry_points
|
|
||||||
|
|
||||||
|
|
||||||
def load_plugin(plugin_group: str, name: str) -> Any:
|
def load_plugin(plugin_group: str, name: str) -> Any:
|
||||||
"""Loads plugin using entry point specification.
|
"""Loads plugin using entry point specification.
|
||||||
|
@ -23,3 +17,16 @@ def load_plugin(plugin_group: str, name: str) -> Any:
|
||||||
return None
|
return None
|
||||||
plugin = plugins[name]
|
plugin = plugins[name]
|
||||||
return plugin.load()
|
return plugin.load()
|
||||||
|
|
||||||
|
|
||||||
|
def load_all(group: str) -> Any:
|
||||||
|
"""Loads all plugins using entry point specification.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
group: Name of plugin group.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Classes from specified group.
|
||||||
|
"""
|
||||||
|
plugins = entry_points(group=group)
|
||||||
|
return [plugin.load() for plugin in plugins]
|
||||||
|
|
|
@ -8,28 +8,40 @@ from tenacity import retry
|
||||||
from tenacity.stop import stop_after_attempt
|
from tenacity.stop import stop_after_attempt
|
||||||
from tenacity.wait import wait_fixed
|
from tenacity.wait import wait_fixed
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions
|
from frostfs_testlib.shell.command_inspectors import SuInspector
|
||||||
|
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
|
|
||||||
class RemoteProcess:
|
class RemoteProcess:
|
||||||
def __init__(self, cmd: str, process_dir: str, shell: Shell):
|
def __init__(
|
||||||
|
self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str
|
||||||
|
):
|
||||||
self.process_dir = process_dir
|
self.process_dir = process_dir
|
||||||
self.cmd = cmd
|
self.cmd = cmd
|
||||||
self.stdout_last_line_number = 0
|
self.stdout_last_line_number = 0
|
||||||
self.stderr_last_line_number = 0
|
self.stderr_last_line_number = 0
|
||||||
self.pid: Optional[str] = None
|
self.pid: Optional[str] = None
|
||||||
self.proc_rc: Optional[int] = None
|
self.proc_rc: Optional[int] = None
|
||||||
|
self.proc_start_time: Optional[int] = None
|
||||||
|
self.proc_end_time: Optional[int] = None
|
||||||
self.saved_stdout: Optional[str] = None
|
self.saved_stdout: Optional[str] = None
|
||||||
self.saved_stderr: Optional[str] = None
|
self.saved_stderr: Optional[str] = None
|
||||||
self.shell = shell
|
self.shell = shell
|
||||||
|
self.proc_id: str = proc_id
|
||||||
|
self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else []
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@reporter.step_deco("Create remote process")
|
@reporter.step("Create remote process")
|
||||||
def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess:
|
def create(
|
||||||
|
cls,
|
||||||
|
command: str,
|
||||||
|
shell: Shell,
|
||||||
|
working_dir: str = "/tmp",
|
||||||
|
user: Optional[str] = None,
|
||||||
|
proc_id: Optional[str] = None,
|
||||||
|
) -> RemoteProcess:
|
||||||
"""
|
"""
|
||||||
Create a process on a remote host.
|
Create a process on a remote host.
|
||||||
|
|
||||||
|
@ -39,6 +51,8 @@ class RemoteProcess:
|
||||||
rc: contains script return code
|
rc: contains script return code
|
||||||
stderr: contains script errors
|
stderr: contains script errors
|
||||||
stdout: contains script output
|
stdout: contains script output
|
||||||
|
user: user on behalf whom command will be executed
|
||||||
|
proc_id: process string identificator
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
shell: Shell instance
|
shell: Shell instance
|
||||||
|
@ -48,16 +62,32 @@ class RemoteProcess:
|
||||||
Returns:
|
Returns:
|
||||||
RemoteProcess instance for further examination
|
RemoteProcess instance for further examination
|
||||||
"""
|
"""
|
||||||
|
if proc_id is None:
|
||||||
|
proc_id = f"{uuid.uuid4()}"
|
||||||
|
|
||||||
|
cmd_inspector = SuInspector(user) if user else None
|
||||||
remote_process = cls(
|
remote_process = cls(
|
||||||
cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell
|
cmd=command,
|
||||||
|
process_dir=os.path.join(working_dir, f"proc_{proc_id}"),
|
||||||
|
shell=shell,
|
||||||
|
cmd_inspector=cmd_inspector,
|
||||||
|
proc_id=proc_id,
|
||||||
)
|
)
|
||||||
remote_process._create_process_dir()
|
|
||||||
remote_process._generate_command_script(command)
|
|
||||||
remote_process._start_process()
|
|
||||||
remote_process.pid = remote_process._get_pid()
|
|
||||||
return remote_process
|
return remote_process
|
||||||
|
|
||||||
@reporter.step_deco("Get process stdout")
|
@reporter.step("Start remote process")
|
||||||
|
def start(self):
|
||||||
|
"""
|
||||||
|
Starts a process on a remote host.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._create_process_dir()
|
||||||
|
self._generate_command_script()
|
||||||
|
self._start_process()
|
||||||
|
self.pid = self._get_pid()
|
||||||
|
|
||||||
|
@reporter.step("Get process stdout")
|
||||||
def stdout(self, full: bool = False) -> str:
|
def stdout(self, full: bool = False) -> str:
|
||||||
"""
|
"""
|
||||||
Method to get process stdout, either fresh info or full.
|
Method to get process stdout, either fresh info or full.
|
||||||
|
@ -73,7 +103,8 @@ class RemoteProcess:
|
||||||
cur_stdout = self.saved_stdout
|
cur_stdout = self.saved_stdout
|
||||||
else:
|
else:
|
||||||
terminal = self.shell.exec(
|
terminal = self.shell.exec(
|
||||||
f"cat {self.process_dir}/stdout", options=CommandOptions(no_log=True)
|
f"cat {self.process_dir}/stdout",
|
||||||
|
options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors),
|
||||||
)
|
)
|
||||||
if self.proc_rc is not None:
|
if self.proc_rc is not None:
|
||||||
self.saved_stdout = terminal.stdout
|
self.saved_stdout = terminal.stdout
|
||||||
|
@ -88,7 +119,7 @@ class RemoteProcess:
|
||||||
return resulted_stdout
|
return resulted_stdout
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
@reporter.step_deco("Get process stderr")
|
@reporter.step("Get process stderr")
|
||||||
def stderr(self, full: bool = False) -> str:
|
def stderr(self, full: bool = False) -> str:
|
||||||
"""
|
"""
|
||||||
Method to get process stderr, either fresh info or full.
|
Method to get process stderr, either fresh info or full.
|
||||||
|
@ -104,7 +135,8 @@ class RemoteProcess:
|
||||||
cur_stderr = self.saved_stderr
|
cur_stderr = self.saved_stderr
|
||||||
else:
|
else:
|
||||||
terminal = self.shell.exec(
|
terminal = self.shell.exec(
|
||||||
f"cat {self.process_dir}/stderr", options=CommandOptions(no_log=True)
|
f"cat {self.process_dir}/stderr",
|
||||||
|
options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors),
|
||||||
)
|
)
|
||||||
if self.proc_rc is not None:
|
if self.proc_rc is not None:
|
||||||
self.saved_stderr = terminal.stdout
|
self.saved_stderr = terminal.stdout
|
||||||
|
@ -118,84 +150,131 @@ class RemoteProcess:
|
||||||
return resulted_stderr
|
return resulted_stderr
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
@reporter.step_deco("Get process rc")
|
@reporter.step("Get process rc")
|
||||||
def rc(self) -> Optional[int]:
|
def rc(self) -> Optional[int]:
|
||||||
if self.proc_rc is not None:
|
if self.proc_rc is not None:
|
||||||
return self.proc_rc
|
return self.proc_rc
|
||||||
|
|
||||||
terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False))
|
result = self._cat_proc_file("rc")
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.proc_rc = int(result)
|
||||||
|
return self.proc_rc
|
||||||
|
|
||||||
|
@reporter.step("Get process start time")
|
||||||
|
def start_time(self) -> Optional[int]:
|
||||||
|
if self.proc_start_time is not None:
|
||||||
|
return self.proc_start_time
|
||||||
|
|
||||||
|
result = self._cat_proc_file("start_time")
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.proc_start_time = int(result)
|
||||||
|
return self.proc_start_time
|
||||||
|
|
||||||
|
@reporter.step("Get process end time")
|
||||||
|
def end_time(self) -> Optional[int]:
|
||||||
|
if self.proc_end_time is not None:
|
||||||
|
return self.proc_end_time
|
||||||
|
|
||||||
|
result = self._cat_proc_file("end_time")
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.proc_end_time = int(result)
|
||||||
|
return self.proc_end_time
|
||||||
|
|
||||||
|
def _cat_proc_file(self, file: str) -> Optional[str]:
|
||||||
|
terminal = self.shell.exec(
|
||||||
|
f"cat {self.process_dir}/{file}",
|
||||||
|
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True),
|
||||||
|
)
|
||||||
if "No such file or directory" in terminal.stderr:
|
if "No such file or directory" in terminal.stderr:
|
||||||
return None
|
return None
|
||||||
elif terminal.stderr or terminal.return_code != 0:
|
elif terminal.stderr or terminal.return_code != 0:
|
||||||
raise AssertionError(f"cat process rc was not successful: {terminal.stderr}")
|
raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}")
|
||||||
|
|
||||||
self.proc_rc = int(terminal.stdout)
|
return terminal.stdout
|
||||||
return self.proc_rc
|
|
||||||
|
|
||||||
@reporter.step_deco("Check if process is running")
|
@reporter.step("Check if process is running")
|
||||||
def running(self) -> bool:
|
def running(self) -> bool:
|
||||||
return self.rc() is None
|
return self.rc() is None
|
||||||
|
|
||||||
@reporter.step_deco("Send signal to process")
|
@reporter.step("Send signal to process")
|
||||||
def send_signal(self, signal: int) -> None:
|
def send_signal(self, signal: int) -> None:
|
||||||
kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False))
|
kill_res = self.shell.exec(
|
||||||
|
f"kill -{signal} {self.pid}",
|
||||||
|
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors),
|
||||||
|
)
|
||||||
if "No such process" in kill_res.stderr:
|
if "No such process" in kill_res.stderr:
|
||||||
return
|
return
|
||||||
if kill_res.return_code:
|
if kill_res.return_code:
|
||||||
raise AssertionError(
|
raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}")
|
||||||
f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}"
|
|
||||||
)
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop process")
|
@reporter.step("Stop process")
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
self.send_signal(15)
|
self.send_signal(15)
|
||||||
|
|
||||||
@reporter.step_deco("Kill process")
|
@reporter.step("Kill process")
|
||||||
def kill(self) -> None:
|
def kill(self) -> None:
|
||||||
self.send_signal(9)
|
self.send_signal(9)
|
||||||
|
|
||||||
@reporter.step_deco("Clear process directory")
|
@reporter.step("Clear process directory")
|
||||||
def clear(self) -> None:
|
def clear(self) -> None:
|
||||||
if self.process_dir == "/":
|
if self.process_dir == "/":
|
||||||
raise AssertionError(f"Invalid path to delete: {self.process_dir}")
|
raise AssertionError(f"Invalid path to delete: {self.process_dir}")
|
||||||
self.shell.exec(f"rm -rf {self.process_dir}")
|
self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
|
||||||
|
|
||||||
@reporter.step_deco("Start remote process")
|
@reporter.step("Start remote process")
|
||||||
def _start_process(self) -> None:
|
def _start_process(self) -> None:
|
||||||
self.shell.exec(
|
self.shell.exec(
|
||||||
f"nohup {self.process_dir}/command.sh </dev/null "
|
f"nohup {self.process_dir}/command.sh </dev/null "
|
||||||
f">{self.process_dir}/stdout "
|
f">{self.process_dir}/stdout "
|
||||||
f"2>{self.process_dir}/stderr &"
|
f"2>{self.process_dir}/stderr &",
|
||||||
|
CommandOptions(extra_inspectors=self.cmd_inspectors),
|
||||||
)
|
)
|
||||||
|
|
||||||
@reporter.step_deco("Create process directory")
|
@reporter.step("Create process directory")
|
||||||
def _create_process_dir(self) -> None:
|
def _create_process_dir(self) -> None:
|
||||||
self.shell.exec(f"mkdir {self.process_dir}")
|
self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
|
||||||
self.shell.exec(f"chmod 777 {self.process_dir}")
|
self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
|
||||||
terminal = self.shell.exec(f"realpath {self.process_dir}")
|
terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
|
||||||
self.process_dir = terminal.stdout.strip()
|
self.process_dir = terminal.stdout.strip()
|
||||||
|
|
||||||
@reporter.step_deco("Get pid")
|
@reporter.step("Get pid")
|
||||||
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
|
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
|
||||||
def _get_pid(self) -> str:
|
def _get_pid(self) -> str:
|
||||||
terminal = self.shell.exec(f"cat {self.process_dir}/pid")
|
terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors))
|
||||||
assert terminal.stdout, f"invalid pid: {terminal.stdout}"
|
assert terminal.stdout, f"invalid pid: {terminal.stdout}"
|
||||||
return terminal.stdout.strip()
|
return terminal.stdout.strip()
|
||||||
|
|
||||||
@reporter.step_deco("Generate command script")
|
@reporter.step("Generate command script")
|
||||||
def _generate_command_script(self, command: str) -> None:
|
def _generate_command_script(self) -> None:
|
||||||
command = command.replace('"', '\\"').replace("\\", "\\\\")
|
command = self.cmd.replace('"', '\\"').replace("\\", "\\\\")
|
||||||
script = (
|
script = (
|
||||||
f"#!/bin/bash\n"
|
f"#!/bin/bash\n"
|
||||||
f"cd {self.process_dir}\n"
|
f"cd {self.process_dir}\n"
|
||||||
|
f"date +%s > {self.process_dir}/start_time\n"
|
||||||
f"{command} &\n"
|
f"{command} &\n"
|
||||||
f"pid=\$!\n"
|
f"pid=\$!\n"
|
||||||
f"cd {self.process_dir}\n"
|
f"cd {self.process_dir}\n"
|
||||||
f"echo \$pid > {self.process_dir}/pid\n"
|
f"echo \$pid > {self.process_dir}/pid\n"
|
||||||
f"wait \$pid\n"
|
f"wait \$pid\n"
|
||||||
f"echo $? > {self.process_dir}/rc"
|
f"echo $? > {self.process_dir}/rc\n"
|
||||||
|
f"date +%s > {self.process_dir}/end_time\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh')
|
self.shell.exec(
|
||||||
self.shell.exec(f"cat {self.process_dir}/command.sh")
|
f'echo "{script}" > {self.process_dir}/command.sh',
|
||||||
self.shell.exec(f"chmod +x {self.process_dir}/command.sh")
|
CommandOptions(extra_inspectors=self.cmd_inspectors),
|
||||||
|
)
|
||||||
|
self.shell.exec(
|
||||||
|
f"cat {self.process_dir}/command.sh",
|
||||||
|
CommandOptions(extra_inspectors=self.cmd_inspectors),
|
||||||
|
)
|
||||||
|
self.shell.exec(
|
||||||
|
f"chmod +x {self.process_dir}/command.sh",
|
||||||
|
CommandOptions(extra_inspectors=self.cmd_inspectors),
|
||||||
|
)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from frostfs_testlib.reporter.allure_handler import AllureHandler
|
from frostfs_testlib.reporter.allure_handler import AllureHandler
|
||||||
from frostfs_testlib.reporter.interfaces import ReporterHandler
|
from frostfs_testlib.reporter.interfaces import ReporterHandler
|
||||||
from frostfs_testlib.reporter.reporter import Reporter
|
from frostfs_testlib.reporter.reporter import Reporter
|
||||||
|
from frostfs_testlib.reporter.steps_logger import StepsLogger
|
||||||
|
|
||||||
__reporter = Reporter()
|
__reporter = Reporter()
|
||||||
|
|
||||||
|
@ -15,3 +18,11 @@ def get_reporter() -> Reporter:
|
||||||
Singleton reporter instance.
|
Singleton reporter instance.
|
||||||
"""
|
"""
|
||||||
return __reporter
|
return __reporter
|
||||||
|
|
||||||
|
|
||||||
|
def step(title: str):
|
||||||
|
return __reporter.step(title)
|
||||||
|
|
||||||
|
|
||||||
|
def attach(content: Any, file_name: str):
|
||||||
|
return __reporter.attach(content, file_name)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import os
|
import os
|
||||||
from contextlib import AbstractContextManager
|
from contextlib import AbstractContextManager, ContextDecorator
|
||||||
from textwrap import shorten
|
from textwrap import shorten
|
||||||
from typing import Any, Callable
|
from typing import Any, Callable
|
||||||
|
|
||||||
|
@ -12,8 +12,8 @@ from frostfs_testlib.reporter.interfaces import ReporterHandler
|
||||||
class AllureHandler(ReporterHandler):
|
class AllureHandler(ReporterHandler):
|
||||||
"""Handler that stores test artifacts in Allure report."""
|
"""Handler that stores test artifacts in Allure report."""
|
||||||
|
|
||||||
def step(self, name: str) -> AbstractContextManager:
|
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
|
||||||
name = shorten(name, width=70, placeholder="...")
|
name = shorten(name, width=140, placeholder="...")
|
||||||
return allure.step(name)
|
return allure.step(name)
|
||||||
|
|
||||||
def step_decorator(self, name: str) -> Callable:
|
def step_decorator(self, name: str) -> Callable:
|
||||||
|
@ -21,9 +21,14 @@ class AllureHandler(ReporterHandler):
|
||||||
|
|
||||||
def attach(self, body: Any, file_name: str) -> None:
|
def attach(self, body: Any, file_name: str) -> None:
|
||||||
attachment_name, extension = os.path.splitext(file_name)
|
attachment_name, extension = os.path.splitext(file_name)
|
||||||
|
if extension.startswith("."):
|
||||||
|
extension = extension[1:]
|
||||||
attachment_type = self._resolve_attachment_type(extension)
|
attachment_type = self._resolve_attachment_type(extension)
|
||||||
|
|
||||||
allure.attach(body, attachment_name, attachment_type, extension)
|
if os.path.exists(body):
|
||||||
|
allure.attach.file(body, file_name, attachment_type, extension)
|
||||||
|
else:
|
||||||
|
allure.attach(body, attachment_name, attachment_type, extension)
|
||||||
|
|
||||||
def _resolve_attachment_type(self, extension: str) -> attachment_type:
|
def _resolve_attachment_type(self, extension: str) -> attachment_type:
|
||||||
"""Try to find matching Allure attachment type by extension.
|
"""Try to find matching Allure attachment type by extension.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from contextlib import AbstractContextManager
|
from contextlib import AbstractContextManager, ContextDecorator
|
||||||
from typing import Any, Callable
|
from typing import Any, Callable
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ class ReporterHandler(ABC):
|
||||||
"""Interface of handler that stores test artifacts in some reporting tool."""
|
"""Interface of handler that stores test artifacts in some reporting tool."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def step(self, name: str) -> AbstractContextManager:
|
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
|
||||||
"""Register a new step in test execution.
|
"""Register a new step in test execution.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -5,6 +5,7 @@ from typing import Any, Callable, Optional
|
||||||
|
|
||||||
from frostfs_testlib.plugins import load_plugin
|
from frostfs_testlib.plugins import load_plugin
|
||||||
from frostfs_testlib.reporter.interfaces import ReporterHandler
|
from frostfs_testlib.reporter.interfaces import ReporterHandler
|
||||||
|
from frostfs_testlib.utils.func_utils import format_by_args
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
|
@ -63,7 +64,8 @@ class Reporter:
|
||||||
def wrapper(*a, **kw):
|
def wrapper(*a, **kw):
|
||||||
resulting_func = func
|
resulting_func = func
|
||||||
for handler in self.handlers:
|
for handler in self.handlers:
|
||||||
decorator = handler.step_decorator(name)
|
parsed_name = format_by_args(func, name, *a, **kw)
|
||||||
|
decorator = handler.step_decorator(parsed_name)
|
||||||
resulting_func = decorator(resulting_func)
|
resulting_func = decorator(resulting_func)
|
||||||
|
|
||||||
return resulting_func(*a, **kw)
|
return resulting_func(*a, **kw)
|
||||||
|
@ -81,11 +83,11 @@ class Reporter:
|
||||||
Returns:
|
Returns:
|
||||||
Step context.
|
Step context.
|
||||||
"""
|
"""
|
||||||
if not self.handlers:
|
|
||||||
return _empty_step()
|
|
||||||
|
|
||||||
step_contexts = [handler.step(name) for handler in self.handlers]
|
step_contexts = [handler.step(name) for handler in self.handlers]
|
||||||
return AggregateContextManager(step_contexts)
|
if not step_contexts:
|
||||||
|
step_contexts = [_empty_step()]
|
||||||
|
decorated_wrapper = self.step_deco(name)
|
||||||
|
return AggregateContextManager(step_contexts, decorated_wrapper)
|
||||||
|
|
||||||
def attach(self, content: Any, file_name: str) -> None:
|
def attach(self, content: Any, file_name: str) -> None:
|
||||||
"""Attach specified content with given file name to the test report.
|
"""Attach specified content with given file name to the test report.
|
||||||
|
@ -104,9 +106,10 @@ class AggregateContextManager(AbstractContextManager):
|
||||||
|
|
||||||
contexts: list[AbstractContextManager]
|
contexts: list[AbstractContextManager]
|
||||||
|
|
||||||
def __init__(self, contexts: list[AbstractContextManager]) -> None:
|
def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.contexts = contexts
|
self.contexts = contexts
|
||||||
|
self.wrapper = decorated_wrapper
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
for context in self.contexts:
|
for context in self.contexts:
|
||||||
|
@ -127,3 +130,6 @@ class AggregateContextManager(AbstractContextManager):
|
||||||
# If all context agreed to suppress exception, then suppress it;
|
# If all context agreed to suppress exception, then suppress it;
|
||||||
# otherwise return None to reraise
|
# otherwise return None to reraise
|
||||||
return True if all(suppress_decisions) else None
|
return True if all(suppress_decisions) else None
|
||||||
|
|
||||||
|
def __call__(self, *args: Any, **kwds: Any) -> Any:
|
||||||
|
return self.wrapper(*args, **kwds)
|
||||||
|
|
56
src/frostfs_testlib/reporter/steps_logger.py
Normal file
56
src/frostfs_testlib/reporter/steps_logger.py
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
from contextlib import AbstractContextManager, ContextDecorator
|
||||||
|
from functools import wraps
|
||||||
|
from types import TracebackType
|
||||||
|
from typing import Any, Callable
|
||||||
|
|
||||||
|
from frostfs_testlib.reporter.interfaces import ReporterHandler
|
||||||
|
|
||||||
|
|
||||||
|
class StepsLogger(ReporterHandler):
|
||||||
|
"""Handler that prints steps to log."""
|
||||||
|
|
||||||
|
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
|
||||||
|
return StepLoggerContext(name)
|
||||||
|
|
||||||
|
def step_decorator(self, name: str) -> Callable:
|
||||||
|
return StepLoggerContext(name)
|
||||||
|
|
||||||
|
def attach(self, body: Any, file_name: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class StepLoggerContext(AbstractContextManager):
|
||||||
|
INDENT = {}
|
||||||
|
|
||||||
|
def __init__(self, title: str):
|
||||||
|
self.title = title
|
||||||
|
self.logger = logging.getLogger("NeoLogger")
|
||||||
|
self.thread = threading.get_ident()
|
||||||
|
if self.thread not in StepLoggerContext.INDENT:
|
||||||
|
StepLoggerContext.INDENT[self.thread] = 1
|
||||||
|
|
||||||
|
def __enter__(self) -> Any:
|
||||||
|
indent = ">" * StepLoggerContext.INDENT[self.thread]
|
||||||
|
self.logger.info(f"[{self.thread}] {indent} {self.title}")
|
||||||
|
StepLoggerContext.INDENT[self.thread] += 1
|
||||||
|
|
||||||
|
def __exit__(
|
||||||
|
self,
|
||||||
|
__exc_type: type[BaseException] | None,
|
||||||
|
__exc_value: BaseException | None,
|
||||||
|
__traceback: TracebackType | None,
|
||||||
|
) -> bool | None:
|
||||||
|
|
||||||
|
StepLoggerContext.INDENT[self.thread] -= 1
|
||||||
|
indent = "<" * StepLoggerContext.INDENT[self.thread]
|
||||||
|
self.logger.info(f"[{self.thread}] {indent} {self.title}")
|
||||||
|
|
||||||
|
def __call__(self, func):
|
||||||
|
@wraps(func)
|
||||||
|
def impl(*a, **kw):
|
||||||
|
with self:
|
||||||
|
return func(*a, **kw)
|
||||||
|
|
||||||
|
return impl
|
|
@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
|
||||||
# Config for frostfs-adm utility. Optional if tests are running against devenv
|
# Config for frostfs-adm utility. Optional if tests are running against devenv
|
||||||
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
|
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
|
||||||
|
|
||||||
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)
|
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s")
|
||||||
|
|
|
@ -10,8 +10,10 @@ COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000")
|
||||||
|
|
||||||
SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m")
|
SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m")
|
||||||
|
|
||||||
|
STORAGE_USER_NAME = "frostfs-storage"
|
||||||
|
|
||||||
MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s")
|
MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s")
|
||||||
MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "1s")
|
MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s")
|
||||||
FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s")
|
FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s")
|
||||||
|
|
||||||
# Time interval that allows a GC pass on storage node (this includes GC sleep interval
|
# Time interval that allows a GC pass on storage node (this includes GC sleep interval
|
||||||
|
@ -41,6 +43,14 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file:
|
||||||
|
|
||||||
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
|
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
|
||||||
# without any retries)
|
# without any retries)
|
||||||
MAX_REQUEST_ATTEMPTS = 1
|
MAX_REQUEST_ATTEMPTS = 5
|
||||||
RETRY_MODE = "standard"
|
RETRY_MODE = "standard"
|
||||||
CREDENTIALS_CREATE_TIMEOUT = "1m"
|
CREDENTIALS_CREATE_TIMEOUT = "1m"
|
||||||
|
|
||||||
|
|
||||||
|
HOSTING_CONFIG_FILE = os.getenv(
|
||||||
|
"HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml"))
|
||||||
|
)
|
||||||
|
|
||||||
|
MORE_LOG = os.getenv("MORE_LOG", "1")
|
||||||
|
EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH"
|
||||||
|
|
|
@ -23,6 +23,14 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
|
||||||
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
|
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
|
||||||
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
|
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
|
||||||
|
|
||||||
S3_MALFORMED_XML_REQUEST = (
|
S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs"
|
||||||
"The XML you provided was not well-formed or did not validate against our published schema."
|
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema."
|
||||||
)
|
|
||||||
|
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
|
||||||
|
# Errors from node missing reasons if request was forwarded. Commenting for now
|
||||||
|
# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
|
||||||
|
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request"
|
||||||
|
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
|
||||||
|
# Errors from node missing reasons if request was forwarded. Commenting for now
|
||||||
|
# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"
|
||||||
|
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request"
|
||||||
|
|
|
@ -7,11 +7,14 @@ LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "service")
|
||||||
LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD")
|
LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD")
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
|
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE")
|
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE")
|
||||||
BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 4)
|
BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0)
|
||||||
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 4)
|
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0)
|
||||||
BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0)
|
BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0)
|
||||||
BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600)
|
BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0)
|
||||||
|
BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800)
|
||||||
BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32)
|
BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32)
|
||||||
|
BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8))
|
||||||
|
BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s")
|
||||||
|
|
||||||
# This will decrease load params for some weak environments
|
# This will decrease load params for some weak environments
|
||||||
BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1)
|
BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1)
|
||||||
|
@ -23,8 +26,10 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv(
|
||||||
)
|
)
|
||||||
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
|
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
|
||||||
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
|
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
|
||||||
|
PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20")
|
||||||
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
|
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
|
||||||
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10")
|
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1")
|
||||||
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
|
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
|
||||||
K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30")
|
K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30")
|
||||||
|
K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300))
|
||||||
LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml")
|
LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml")
|
||||||
|
|
|
@ -16,11 +16,10 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD")
|
||||||
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
|
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
|
||||||
|
|
||||||
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
|
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
|
||||||
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(
|
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true"))
|
||||||
os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set this to False for disable autouse fixture like node healthcheck during developing time.
|
# Set this to False for disable autouse fixture like node healthcheck during developing time.
|
||||||
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(
|
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true"))
|
||||||
os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")
|
|
||||||
)
|
# Use cache for fixtures with @cachec_fixture decorator
|
||||||
|
OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false"))
|
||||||
|
|
9
src/frostfs_testlib/resources/s3_acl_grants.py
Normal file
9
src/frostfs_testlib/resources/s3_acl_grants.py
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
|
||||||
|
ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"}
|
||||||
|
ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"}
|
||||||
|
CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"}
|
||||||
|
|
||||||
|
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
|
||||||
|
PRIVATE_GRANTS = []
|
||||||
|
PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT]
|
||||||
|
PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
16
src/frostfs_testlib/s3/curl_bucket_resolver.py
Normal file
16
src/frostfs_testlib/s3/curl_bucket_resolver.py
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.generic_cli import GenericCli
|
||||||
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
|
class CurlBucketContainerResolver(BucketContainerResolver):
|
||||||
|
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
|
||||||
|
curl = GenericCli("curl", node.host)
|
||||||
|
output = curl(f"-I http://127.0.0.1:8084/{bucket_name}")
|
||||||
|
pattern = r"X-Container-Id: (\S+)"
|
||||||
|
cid = re.findall(pattern, output.stdout)
|
||||||
|
if cid:
|
||||||
|
return cid[0]
|
||||||
|
return None
|
|
@ -1,8 +1,11 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
|
||||||
from typing import Literal, Optional, Union
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
|
|
||||||
def _make_objs_dict(key_names):
|
def _make_objs_dict(key_names):
|
||||||
objs_list = []
|
objs_list = []
|
||||||
|
@ -13,7 +16,8 @@ def _make_objs_dict(key_names):
|
||||||
return objs_dict
|
return objs_dict
|
||||||
|
|
||||||
|
|
||||||
class VersioningStatus(Enum):
|
class VersioningStatus(HumanReadableEnum):
|
||||||
|
UNDEFINED = None
|
||||||
ENABLED = "Enabled"
|
ENABLED = "Enabled"
|
||||||
SUSPENDED = "Suspended"
|
SUSPENDED = "Suspended"
|
||||||
|
|
||||||
|
@ -29,11 +33,35 @@ ACL_COPY = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class S3ClientWrapper(ABC):
|
class BucketContainerResolver(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
|
||||||
|
"""
|
||||||
|
Resolve Container ID from bucket name
|
||||||
|
|
||||||
|
Args:
|
||||||
|
node: node from where we want to resolve
|
||||||
|
bucket_name: name of the bucket
|
||||||
|
**kwargs: any other required params
|
||||||
|
|
||||||
|
Returns: Container ID
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("Call from abstract class")
|
||||||
|
|
||||||
|
|
||||||
|
class S3ClientWrapper(HumanReadableABC):
|
||||||
|
@abstractmethod
|
||||||
|
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set_endpoint(self, s3gate_endpoint: str):
|
||||||
|
"""Set endpoint"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set_iam_endpoint(self, iam_endpoint: str):
|
||||||
|
"""Set iam endpoint"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def create_bucket(
|
def create_bucket(
|
||||||
self,
|
self,
|
||||||
|
@ -129,6 +157,10 @@ class S3ClientWrapper(ABC):
|
||||||
def get_bucket_policy(self, bucket: str) -> str:
|
def get_bucket_policy(self, bucket: str) -> str:
|
||||||
"""Returns the policy of a specified bucket."""
|
"""Returns the policy of a specified bucket."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_bucket_policy(self, bucket: str) -> str:
|
||||||
|
"""Deletes the policy of a specified bucket."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
|
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
|
||||||
"""Applies S3 bucket policy to an S3 bucket."""
|
"""Applies S3 bucket policy to an S3 bucket."""
|
||||||
|
@ -163,7 +195,9 @@ class S3ClientWrapper(ABC):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
def list_objects(
|
||||||
|
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
|
||||||
|
) -> Union[dict, list[str]]:
|
||||||
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
|
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
|
||||||
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||||
A 200 OK response can contain valid or invalid XML. Make sure to design your application
|
A 200 OK response can contain valid or invalid XML. Make sure to design your application
|
||||||
|
@ -262,7 +296,7 @@ class S3ClientWrapper(ABC):
|
||||||
version_id: Optional[str] = None,
|
version_id: Optional[str] = None,
|
||||||
object_range: Optional[tuple[int, int]] = None,
|
object_range: Optional[tuple[int, int]] = None,
|
||||||
full_output: bool = False,
|
full_output: bool = False,
|
||||||
) -> Union[dict, str]:
|
) -> dict | TestFile:
|
||||||
"""Retrieves objects from S3."""
|
"""Retrieves objects from S3."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -290,15 +324,11 @@ class S3ClientWrapper(ABC):
|
||||||
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
|
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def upload_part(
|
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
|
||||||
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
|
|
||||||
) -> str:
|
|
||||||
"""Uploads a part in a multipart upload."""
|
"""Uploads a part in a multipart upload."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def upload_part_copy(
|
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
|
||||||
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
|
|
||||||
) -> str:
|
|
||||||
"""Uploads a part by copying data from an existing object as data source."""
|
"""Uploads a part by copying data from an existing object as data source."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -342,6 +372,18 @@ class S3ClientWrapper(ABC):
|
||||||
def delete_object_tagging(self, bucket: str, key: str) -> None:
|
def delete_object_tagging(self, bucket: str, key: str) -> None:
|
||||||
"""Removes the entire tag set from the specified object."""
|
"""Removes the entire tag set from the specified object."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict:
|
||||||
|
"""Adds or updates bucket lifecycle configuration"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_bucket_lifecycle_configuration(self, bucket: str) -> dict:
|
||||||
|
"""Gets bucket lifecycle configuration"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_bucket_lifecycle(self, bucket: str) -> dict:
|
||||||
|
"""Deletes bucket lifecycle"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_object_attributes(
|
def get_object_attributes(
|
||||||
self,
|
self,
|
||||||
|
@ -376,3 +418,194 @@ class S3ClientWrapper(ABC):
|
||||||
"""cp directory TODO: Add proper description"""
|
"""cp directory TODO: Add proper description"""
|
||||||
|
|
||||||
# END OF OBJECT METHODS #
|
# END OF OBJECT METHODS #
|
||||||
|
|
||||||
|
# IAM METHODS #
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
|
||||||
|
"""Adds the specified user to the specified group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
||||||
|
"""Attaches the specified managed policy to the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
||||||
|
"""Attaches the specified managed policy to the specified user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_create_access_key(self, user_name: str) -> dict:
|
||||||
|
"""Creates a new AWS secret access key and access key ID for the specified user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_create_group(self, group_name: str) -> dict:
|
||||||
|
"""Creates a new group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
|
||||||
|
"""Creates a new managed policy for your AWS account"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_create_user(self, user_name: str) -> dict:
|
||||||
|
"""Creates a new IAM user for your AWS account"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
|
||||||
|
"""Deletes the access key pair associated with the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_group(self, group_name: str) -> dict:
|
||||||
|
"""Deletes the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
|
||||||
|
"""Deletes the specified inline policy that is embedded in the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_policy(self, policy_arn: str) -> dict:
|
||||||
|
"""Deletes the specified managed policy"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_user(self, user_name: str) -> dict:
|
||||||
|
"""Deletes the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
|
||||||
|
"""Deletes the specified inline policy that is embedded in the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
||||||
|
"""Removes the specified managed policy from the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
||||||
|
"""Removes the specified managed policy from the specified user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_get_group(self, group_name: str) -> dict:
|
||||||
|
"""Returns a list of IAM users that are in the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
|
||||||
|
"""Retrieves the specified inline policy document that is embedded in the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_get_policy(self, policy_arn: str) -> dict:
|
||||||
|
"""Retrieves information about the specified managed policy"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
|
||||||
|
"""Retrieves information about the specified version of the specified managed policy"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_get_user(self, user_name: str) -> dict:
|
||||||
|
"""Retrieves information about the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
|
||||||
|
"""Retrieves the specified inline policy document that is embedded in the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_access_keys(self, user_name: str) -> dict:
|
||||||
|
"""Returns information about the access key IDs associated with the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_attached_group_policies(self, group_name: str) -> dict:
|
||||||
|
"""Lists all managed policies that are attached to the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_attached_user_policies(self, user_name: str) -> dict:
|
||||||
|
"""Lists all managed policies that are attached to the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
|
||||||
|
"""Lists all IAM users, groups, and roles that the specified managed policy is attached to"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_group_policies(self, group_name: str) -> dict:
|
||||||
|
"""Lists the names of the inline policies that are embedded in the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_groups(self) -> dict:
|
||||||
|
"""Lists the IAM groups"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_groups_for_user(self, user_name: str) -> dict:
|
||||||
|
"""Lists the IAM groups that the specified IAM user belongs to"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_policies(self) -> dict:
|
||||||
|
"""Lists all the managed policies that are available in your AWS account"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_policy_versions(self, policy_arn: str) -> dict:
|
||||||
|
"""Lists information about the versions of the specified managed policy"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_user_policies(self, user_name: str) -> dict:
|
||||||
|
"""Lists the names of the inline policies embedded in the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_users(self) -> dict:
|
||||||
|
"""Lists the IAM users"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
|
||||||
|
"""Adds or updates an inline policy document that is embedded in the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
|
||||||
|
"""Adds or updates an inline policy document that is embedded in the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
|
||||||
|
"""Removes the specified user from the specified group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
||||||
|
"""Updates the name and/or the path of the specified IAM group"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
||||||
|
"""Updates the name and/or the path of the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_tag_user(self, user_name: str, tags: list) -> dict:
|
||||||
|
"""Adds one or more tags to an IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_user_tags(self, user_name: str) -> dict:
|
||||||
|
"""List tags of IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
|
||||||
|
"""Removes the specified tags from the user"""
|
||||||
|
|
||||||
|
# MFA methods
|
||||||
|
@abstractmethod
|
||||||
|
def iam_create_virtual_mfa_device(
|
||||||
|
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
|
||||||
|
) -> tuple:
|
||||||
|
"""Creates a new virtual MFA device"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
|
||||||
|
"""Deactivates the specified MFA device and removes it from association with the user name"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
|
||||||
|
"""Deletes a virtual MFA device"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
|
||||||
|
"""Enables the specified MFA device and associates it with the specified IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def iam_list_virtual_mfa_devices(self) -> dict:
|
||||||
|
"""Lists the MFA devices for an IAM user"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def sts_get_session_token(
|
||||||
|
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
|
||||||
|
) -> tuple:
|
||||||
|
"""Get session token for user"""
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell
|
from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell
|
||||||
from frostfs_testlib.shell.local_shell import LocalShell
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
from frostfs_testlib.shell.ssh_shell import SSHShell
|
from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell
|
||||||
|
|
|
@ -7,7 +7,23 @@ class SudoInspector(CommandInspector):
|
||||||
If command is already prepended with sudo, then has no effect.
|
If command is already prepended with sudo, then has no effect.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def inspect(self, command: str) -> str:
|
def inspect(self, original_command: str, command: str) -> str:
|
||||||
if not command.startswith("sudo"):
|
if not command.startswith("sudo"):
|
||||||
return f"sudo {command}"
|
return f"sudo {command}"
|
||||||
return command
|
return command
|
||||||
|
|
||||||
|
|
||||||
|
class SuInspector(CommandInspector):
|
||||||
|
"""Allows to run command as another user via sudo su call
|
||||||
|
|
||||||
|
If command is already prepended with sudo su, then has no effect.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, user: str) -> None:
|
||||||
|
self.user = user
|
||||||
|
|
||||||
|
def inspect(self, original_command: str, command: str) -> str:
|
||||||
|
if not original_command.startswith("sudo su"):
|
||||||
|
cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$")
|
||||||
|
return f'sudo su - {self.user} -c "{cmd}"'
|
||||||
|
return original_command
|
||||||
|
|
|
@ -22,11 +22,12 @@ class CommandInspector(ABC):
|
||||||
"""Interface of inspector that processes command text before execution."""
|
"""Interface of inspector that processes command text before execution."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def inspect(self, command: str) -> str:
|
def inspect(self, original_command: str, command: str) -> str:
|
||||||
"""Transforms command text and returns modified command.
|
"""Transforms command text and returns modified command.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
command: Command to transform with this inspector.
|
command: Command to transform with this inspector.
|
||||||
|
original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Transformed command text.
|
Transformed command text.
|
||||||
|
@ -47,6 +48,7 @@ class CommandOptions:
|
||||||
check: Controls whether to check return code of the command. Set to False to
|
check: Controls whether to check return code of the command. Set to False to
|
||||||
ignore non-zero return codes.
|
ignore non-zero return codes.
|
||||||
no_log: Do not print output to logger if True.
|
no_log: Do not print output to logger if True.
|
||||||
|
extra_inspectors: Exctra command inspectors to process command
|
||||||
"""
|
"""
|
||||||
|
|
||||||
interactive_inputs: Optional[list[InteractiveInput]] = None
|
interactive_inputs: Optional[list[InteractiveInput]] = None
|
||||||
|
@ -54,6 +56,7 @@ class CommandOptions:
|
||||||
timeout: Optional[int] = None
|
timeout: Optional[int] = None
|
||||||
check: bool = True
|
check: bool = True
|
||||||
no_log: bool = False
|
no_log: bool = False
|
||||||
|
extra_inspectors: Optional[list[CommandInspector]] = None
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if self.timeout is None:
|
if self.timeout is None:
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
import logging
|
import logging
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
import tempfile
|
||||||
|
from contextlib import nullcontext
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import IO, Optional
|
from typing import IO, Optional
|
||||||
|
|
||||||
import pexpect
|
import pexpect
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.resources.common import MORE_LOG
|
||||||
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
|
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
|
||||||
|
|
||||||
logger = logging.getLogger("frostfs.testlib.shell")
|
logger = logging.getLogger("frostfs.testlib.shell")
|
||||||
reporter = get_reporter()
|
step_context = reporter.step if MORE_LOG == "1" else nullcontext
|
||||||
|
|
||||||
|
|
||||||
class LocalShell(Shell):
|
class LocalShell(Shell):
|
||||||
|
@ -24,20 +26,22 @@ class LocalShell(Shell):
|
||||||
# If no options were provided, use default options
|
# If no options were provided, use default options
|
||||||
options = options or CommandOptions()
|
options = options or CommandOptions()
|
||||||
|
|
||||||
for inspector in self.command_inspectors:
|
original_command = command
|
||||||
command = inspector.inspect(command)
|
extra_inspectors = options.extra_inspectors if options.extra_inspectors else []
|
||||||
|
for inspector in [*self.command_inspectors, *extra_inspectors]:
|
||||||
|
command = inspector.inspect(original_command, command)
|
||||||
|
|
||||||
logger.info(f"Executing command: {command}")
|
with step_context(f"Executing command: {command}"):
|
||||||
if options.interactive_inputs:
|
if options.interactive_inputs:
|
||||||
return self._exec_interactive(command, options)
|
return self._exec_interactive(command, options)
|
||||||
return self._exec_non_interactive(command, options)
|
return self._exec_non_interactive(command, options)
|
||||||
|
|
||||||
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
||||||
start_time = datetime.utcnow()
|
start_time = datetime.utcnow()
|
||||||
log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output
|
log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output
|
||||||
|
|
||||||
try:
|
try:
|
||||||
command_process = pexpect.spawn(command, timeout=options.timeout)
|
command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True)
|
||||||
except (pexpect.ExceptionPexpect, OSError) as exc:
|
except (pexpect.ExceptionPexpect, OSError) as exc:
|
||||||
raise RuntimeError(f"Command: {command}") from exc
|
raise RuntimeError(f"Command: {command}") from exc
|
||||||
|
|
||||||
|
@ -59,8 +63,7 @@ class LocalShell(Shell):
|
||||||
|
|
||||||
if options.check and result.return_code != 0:
|
if options.check and result.return_code != 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Command: {command}\nreturn code: {result.return_code}\n"
|
f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n"
|
||||||
f"Output: {result.stdout}"
|
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -91,11 +94,7 @@ class LocalShell(Shell):
|
||||||
stderr="",
|
stderr="",
|
||||||
return_code=exc.returncode,
|
return_code=exc.returncode,
|
||||||
)
|
)
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc
|
||||||
f"Command: {command}\nError:\n"
|
|
||||||
f"return code: {exc.returncode}\n"
|
|
||||||
f"output: {exc.output}"
|
|
||||||
) from exc
|
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
|
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
|
||||||
finally:
|
finally:
|
||||||
|
@ -129,22 +128,19 @@ class LocalShell(Shell):
|
||||||
end_time: datetime,
|
end_time: datetime,
|
||||||
result: Optional[CommandResult],
|
result: Optional[CommandResult],
|
||||||
) -> None:
|
) -> None:
|
||||||
# TODO: increase logging level if return code is non 0, should be warning at least
|
if not result:
|
||||||
logger.info(
|
logger.warning(f"Command: {command}\n" f"Error: result is None")
|
||||||
f"Command: {command}\n"
|
return
|
||||||
f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n"
|
|
||||||
f"return code: {result.return_code if result else ''} "
|
|
||||||
f"\nOutput: {result.stdout if result else ''}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if result:
|
status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning)
|
||||||
elapsed_time = end_time - start_time
|
log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}")
|
||||||
command_attachment = (
|
|
||||||
f"COMMAND: {command}\n"
|
elapsed_time = end_time - start_time
|
||||||
f"RETCODE: {result.return_code}\n\n"
|
command_attachment = (
|
||||||
f"STDOUT:\n{result.stdout}\n"
|
f"COMMAND: {command}\n"
|
||||||
f"STDERR:\n{result.stderr}\n"
|
f"RETCODE: {result.return_code}\n\n"
|
||||||
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
|
f"STDOUT:\n{result.stdout}\n"
|
||||||
)
|
f"STDERR:\n{result.stderr}\n"
|
||||||
with reporter.step(f"COMMAND: {command}"):
|
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
|
||||||
reporter.attach(command_attachment, "Command execution.txt")
|
)
|
||||||
|
reporter.attach(command_attachment, "Command execution.txt")
|
||||||
|
|
|
@ -6,24 +6,111 @@ from functools import lru_cache, wraps
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import ClassVar, Optional, Tuple
|
from typing import ClassVar, Optional, Tuple
|
||||||
|
|
||||||
from paramiko import (
|
from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception
|
||||||
AutoAddPolicy,
|
|
||||||
Channel,
|
|
||||||
ECDSAKey,
|
|
||||||
Ed25519Key,
|
|
||||||
PKey,
|
|
||||||
RSAKey,
|
|
||||||
SSHClient,
|
|
||||||
SSHException,
|
|
||||||
ssh_exception,
|
|
||||||
)
|
|
||||||
from paramiko.ssh_exception import AuthenticationException
|
from paramiko.ssh_exception import AuthenticationException
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
|
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials
|
||||||
|
|
||||||
logger = logging.getLogger("frostfs.testlib.shell")
|
logger = logging.getLogger("frostfs.testlib.shell")
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
|
class SshConnectionProvider:
|
||||||
|
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4
|
||||||
|
SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10
|
||||||
|
CONNECTION_TIMEOUT = 60
|
||||||
|
|
||||||
|
instance = None
|
||||||
|
connections: dict[str, SSHClient] = {}
|
||||||
|
creds: dict[str, SshCredentials] = {}
|
||||||
|
|
||||||
|
def __new__(cls):
|
||||||
|
if not cls.instance:
|
||||||
|
cls.instance = super(SshConnectionProvider, cls).__new__(cls)
|
||||||
|
return cls.instance
|
||||||
|
|
||||||
|
def store_creds(self, host: str, ssh_creds: SshCredentials):
|
||||||
|
self.creds[host] = ssh_creds
|
||||||
|
|
||||||
|
def provide(self, host: str, port: str) -> SSHClient:
|
||||||
|
if host not in self.creds:
|
||||||
|
raise RuntimeError(f"Please add credentials for host {host}")
|
||||||
|
|
||||||
|
if host in self.connections:
|
||||||
|
client = self.connections[host]
|
||||||
|
if client:
|
||||||
|
return client
|
||||||
|
|
||||||
|
creds = self.creds[host]
|
||||||
|
client = self._create_connection(host, port, creds)
|
||||||
|
self.connections[host] = client
|
||||||
|
return client
|
||||||
|
|
||||||
|
def drop(self, host: str):
|
||||||
|
if host in self.connections:
|
||||||
|
client = self.connections.pop(host)
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
def drop_all(self):
|
||||||
|
hosts = list(self.connections.keys())
|
||||||
|
for host in hosts:
|
||||||
|
self.drop(host)
|
||||||
|
|
||||||
|
def _create_connection(
|
||||||
|
self,
|
||||||
|
host: str,
|
||||||
|
port: str,
|
||||||
|
creds: SshCredentials,
|
||||||
|
) -> SSHClient:
|
||||||
|
for attempt in range(self.SSH_CONNECTION_ATTEMPTS):
|
||||||
|
connection = SSHClient()
|
||||||
|
connection.set_missing_host_key_policy(AutoAddPolicy())
|
||||||
|
try:
|
||||||
|
if creds.ssh_key_path:
|
||||||
|
logger.info(
|
||||||
|
f"Trying to connect to host {host} as {creds.ssh_login} using SSH key "
|
||||||
|
f"{creds.ssh_key_path} (attempt {attempt})"
|
||||||
|
)
|
||||||
|
connection.connect(
|
||||||
|
hostname=host,
|
||||||
|
port=port,
|
||||||
|
username=creds.ssh_login,
|
||||||
|
pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase),
|
||||||
|
timeout=self.CONNECTION_TIMEOUT,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})"
|
||||||
|
)
|
||||||
|
connection.connect(
|
||||||
|
hostname=host,
|
||||||
|
port=port,
|
||||||
|
username=creds.ssh_login,
|
||||||
|
password=creds.ssh_password,
|
||||||
|
timeout=self.CONNECTION_TIMEOUT,
|
||||||
|
)
|
||||||
|
return connection
|
||||||
|
except AuthenticationException:
|
||||||
|
connection.close()
|
||||||
|
logger.exception(f"Can't connect to host {host}")
|
||||||
|
raise
|
||||||
|
except (
|
||||||
|
SSHException,
|
||||||
|
ssh_exception.NoValidConnectionsError,
|
||||||
|
AttributeError,
|
||||||
|
socket.timeout,
|
||||||
|
OSError,
|
||||||
|
) as exc:
|
||||||
|
connection.close()
|
||||||
|
can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS
|
||||||
|
if can_retry:
|
||||||
|
logger.warn(
|
||||||
|
f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}"
|
||||||
|
)
|
||||||
|
sleep(self.SSH_ATTEMPTS_INTERVAL)
|
||||||
|
continue
|
||||||
|
logger.exception(f"Can't connect to host {host}")
|
||||||
|
raise HostIsNotAvailable(host) from exc
|
||||||
|
|
||||||
|
|
||||||
class HostIsNotAvailable(Exception):
|
class HostIsNotAvailable(Exception):
|
||||||
|
@ -36,9 +123,7 @@ class HostIsNotAvailable(Exception):
|
||||||
|
|
||||||
def log_command(func):
|
def log_command(func):
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrapper(
|
def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult:
|
||||||
shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs
|
|
||||||
) -> CommandResult:
|
|
||||||
command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n")
|
command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n")
|
||||||
with reporter.step(command_info):
|
with reporter.step(command_info):
|
||||||
logger.info(f'Execute command "{command}" on "{shell.host}"')
|
logger.info(f'Execute command "{command}" on "{shell.host}"')
|
||||||
|
@ -91,9 +176,6 @@ class SSHShell(Shell):
|
||||||
# to allow remote command to flush its output buffer
|
# to allow remote command to flush its output buffer
|
||||||
DELAY_AFTER_EXIT = 0.2
|
DELAY_AFTER_EXIT = 0.2
|
||||||
|
|
||||||
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3
|
|
||||||
CONNECTION_TIMEOUT = 90
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
host: str,
|
host: str,
|
||||||
|
@ -103,31 +185,34 @@ class SSHShell(Shell):
|
||||||
private_key_passphrase: Optional[str] = None,
|
private_key_passphrase: Optional[str] = None,
|
||||||
port: str = "22",
|
port: str = "22",
|
||||||
command_inspectors: Optional[list[CommandInspector]] = None,
|
command_inspectors: Optional[list[CommandInspector]] = None,
|
||||||
|
custom_environment: Optional[dict] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
self.connection_provider = SshConnectionProvider()
|
||||||
|
self.connection_provider.store_creds(
|
||||||
|
host, SshCredentials(login, password, private_key_path, private_key_passphrase)
|
||||||
|
)
|
||||||
self.host = host
|
self.host = host
|
||||||
self.port = port
|
self.port = port
|
||||||
self.login = login
|
|
||||||
self.password = password
|
|
||||||
self.private_key_path = private_key_path
|
|
||||||
self.private_key_passphrase = private_key_passphrase
|
|
||||||
self.command_inspectors = command_inspectors or []
|
self.command_inspectors = command_inspectors or []
|
||||||
self.__connection: Optional[SSHClient] = None
|
|
||||||
|
self.environment = custom_environment
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _connection(self):
|
def _connection(self):
|
||||||
if not self.__connection:
|
return self.connection_provider.provide(self.host, self.port)
|
||||||
self.__connection = self._create_connection()
|
|
||||||
return self.__connection
|
|
||||||
|
|
||||||
def drop(self):
|
def drop(self):
|
||||||
self._reset_connection()
|
self.connection_provider.drop(self.host)
|
||||||
|
|
||||||
def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult:
|
def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult:
|
||||||
options = options or CommandOptions()
|
options = options or CommandOptions()
|
||||||
|
|
||||||
for inspector in self.command_inspectors:
|
original_command = command
|
||||||
command = inspector.inspect(command)
|
extra_inspectors = options.extra_inspectors if options.extra_inspectors else []
|
||||||
|
for inspector in [*self.command_inspectors, *extra_inspectors]:
|
||||||
|
command = inspector.inspect(original_command, command)
|
||||||
|
|
||||||
if options.interactive_inputs:
|
if options.interactive_inputs:
|
||||||
result = self._exec_interactive(command, options)
|
result = self._exec_interactive(command, options)
|
||||||
|
@ -136,15 +221,13 @@ class SSHShell(Shell):
|
||||||
|
|
||||||
if options.check and result.return_code != 0:
|
if options.check and result.return_code != 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}"
|
f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n"
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@log_command
|
@log_command
|
||||||
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
||||||
stdin, stdout, stderr = self._connection.exec_command(
|
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment)
|
||||||
command, timeout=options.timeout, get_pty=True
|
|
||||||
)
|
|
||||||
for interactive_input in options.interactive_inputs:
|
for interactive_input in options.interactive_inputs:
|
||||||
input = interactive_input.input
|
input = interactive_input.input
|
||||||
if not input.endswith("\n"):
|
if not input.endswith("\n"):
|
||||||
|
@ -171,7 +254,7 @@ class SSHShell(Shell):
|
||||||
@log_command
|
@log_command
|
||||||
def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
||||||
try:
|
try:
|
||||||
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout)
|
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment)
|
||||||
|
|
||||||
if options.close_stdin:
|
if options.close_stdin:
|
||||||
stdin.close()
|
stdin.close()
|
||||||
|
@ -193,7 +276,7 @@ class SSHShell(Shell):
|
||||||
socket.timeout,
|
socket.timeout,
|
||||||
) as exc:
|
) as exc:
|
||||||
logger.exception(f"Can't execute command {command} on host: {self.host}")
|
logger.exception(f"Can't execute command {command} on host: {self.host}")
|
||||||
self._reset_connection()
|
self.drop()
|
||||||
raise HostIsNotAvailable(self.host) from exc
|
raise HostIsNotAvailable(self.host) from exc
|
||||||
|
|
||||||
def _read_channels(
|
def _read_channels(
|
||||||
|
@ -248,57 +331,3 @@ class SSHShell(Shell):
|
||||||
full_stderr = b"".join(stderr_chunks)
|
full_stderr = b"".join(stderr_chunks)
|
||||||
|
|
||||||
return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore"))
|
return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore"))
|
||||||
|
|
||||||
def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient:
|
|
||||||
for attempt in range(attempts):
|
|
||||||
connection = SSHClient()
|
|
||||||
connection.set_missing_host_key_policy(AutoAddPolicy())
|
|
||||||
try:
|
|
||||||
if self.private_key_path:
|
|
||||||
logger.info(
|
|
||||||
f"Trying to connect to host {self.host} as {self.login} using SSH key "
|
|
||||||
f"{self.private_key_path} (attempt {attempt})"
|
|
||||||
)
|
|
||||||
connection.connect(
|
|
||||||
hostname=self.host,
|
|
||||||
port=self.port,
|
|
||||||
username=self.login,
|
|
||||||
pkey=_load_private_key(self.private_key_path, self.private_key_passphrase),
|
|
||||||
timeout=self.CONNECTION_TIMEOUT,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.info(
|
|
||||||
f"Trying to connect to host {self.host} as {self.login} using password "
|
|
||||||
f"(attempt {attempt})"
|
|
||||||
)
|
|
||||||
connection.connect(
|
|
||||||
hostname=self.host,
|
|
||||||
port=self.port,
|
|
||||||
username=self.login,
|
|
||||||
password=self.password,
|
|
||||||
timeout=self.CONNECTION_TIMEOUT,
|
|
||||||
)
|
|
||||||
return connection
|
|
||||||
except AuthenticationException:
|
|
||||||
connection.close()
|
|
||||||
logger.exception(f"Can't connect to host {self.host}")
|
|
||||||
raise
|
|
||||||
except (
|
|
||||||
SSHException,
|
|
||||||
ssh_exception.NoValidConnectionsError,
|
|
||||||
AttributeError,
|
|
||||||
socket.timeout,
|
|
||||||
OSError,
|
|
||||||
) as exc:
|
|
||||||
connection.close()
|
|
||||||
can_retry = attempt + 1 < attempts
|
|
||||||
if can_retry:
|
|
||||||
logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}")
|
|
||||||
continue
|
|
||||||
logger.exception(f"Can't connect to host {self.host}")
|
|
||||||
raise HostIsNotAvailable(self.host) from exc
|
|
||||||
|
|
||||||
def _reset_connection(self) -> None:
|
|
||||||
if self.__connection:
|
|
||||||
self.__connection.close()
|
|
||||||
self.__connection = None
|
|
||||||
|
|
|
@ -8,29 +8,23 @@ from typing import List, Optional, Union
|
||||||
|
|
||||||
import base58
|
import base58
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.acl import (
|
from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule
|
||||||
EACL_LIFETIME,
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
FROSTFS_CONTRACT_CACHE_TIMEOUT,
|
|
||||||
EACLPubKey,
|
|
||||||
EACLRole,
|
|
||||||
EACLRule,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get extended ACL")
|
@reporter.step("Get extended ACL")
|
||||||
def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
|
def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
try:
|
try:
|
||||||
result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
|
result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid)
|
||||||
except RuntimeError as exc:
|
except RuntimeError as exc:
|
||||||
logger.info("Extended ACL table is not set for this container")
|
logger.info("Extended ACL table is not set for this container")
|
||||||
logger.info(f"Got exception while getting eacl: {exc}")
|
logger.info(f"Got exception while getting eacl: {exc}")
|
||||||
|
@ -40,18 +34,17 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona
|
||||||
return result.stdout
|
return result.stdout
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Set extended ACL")
|
@reporter.step("Set extended ACL")
|
||||||
def set_eacl(
|
def set_eacl(
|
||||||
wallet_path: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
eacl_table_path: str,
|
eacl_table_path: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
session_token: Optional[str] = None,
|
session_token: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
cli.container.set_eacl(
|
cli.container.set_eacl(
|
||||||
wallet=wallet_path,
|
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
table=eacl_table_path,
|
table=eacl_table_path,
|
||||||
|
@ -67,7 +60,7 @@ def _encode_cid_for_eacl(cid: str) -> str:
|
||||||
|
|
||||||
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
|
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
|
||||||
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
|
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
|
||||||
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
|
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
|
||||||
|
|
||||||
with open(table_file_path, "r") as file:
|
with open(table_file_path, "r") as file:
|
||||||
|
@ -78,7 +71,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
|
||||||
|
|
||||||
|
|
||||||
def form_bearertoken_file(
|
def form_bearertoken_file(
|
||||||
wif: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
|
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -93,7 +86,7 @@ def form_bearertoken_file(
|
||||||
enc_cid = _encode_cid_for_eacl(cid) if cid else None
|
enc_cid = _encode_cid_for_eacl(cid) if cid else None
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||||
|
|
||||||
eacl = get_eacl(wif, cid, shell, endpoint)
|
eacl = get_eacl(wallet, cid, shell, endpoint)
|
||||||
json_eacl = dict()
|
json_eacl = dict()
|
||||||
if eacl:
|
if eacl:
|
||||||
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
|
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
|
||||||
|
@ -134,7 +127,7 @@ def form_bearertoken_file(
|
||||||
if sign:
|
if sign:
|
||||||
sign_bearer(
|
sign_bearer(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
wallet_path=wif,
|
wallet=wallet,
|
||||||
eacl_rules_file_from=file_path,
|
eacl_rules_file_from=file_path,
|
||||||
eacl_rules_file_to=file_path,
|
eacl_rules_file_to=file_path,
|
||||||
json=True,
|
json=True,
|
||||||
|
@ -165,27 +158,19 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
|
||||||
return rules
|
return rules
|
||||||
|
|
||||||
|
|
||||||
def sign_bearer(
|
def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None:
|
||||||
shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool
|
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
) -> None:
|
frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json)
|
||||||
frostfscli = FrostfsCli(
|
|
||||||
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
|
|
||||||
)
|
|
||||||
frostfscli.util.sign_bearer_token(
|
|
||||||
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Wait for eACL cache expired")
|
@reporter.step("Wait for eACL cache expired")
|
||||||
def wait_for_cache_expired():
|
def wait_for_cache_expired():
|
||||||
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
|
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Return bearer token in base64 to caller")
|
@reporter.step("Return bearer token in base64 to caller")
|
||||||
def bearer_token_base64_from_file(
|
def bearer_token_base64_from_file(bearer_path: str) -> str:
|
||||||
bearer_path: str,
|
|
||||||
) -> str:
|
|
||||||
with open(bearer_path, "rb") as file:
|
with open(bearer_path, "rb") as file:
|
||||||
signed = file.read()
|
signed = file.read()
|
||||||
return base64.b64encode(signed).decode("utf-8")
|
return base64.b64encode(signed).decode("utf-8")
|
||||||
|
|
|
@ -1,29 +1,30 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.plugins import load_plugin
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.utils import json_utils
|
from frostfs_testlib.utils import json_utils
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class StorageContainerInfo:
|
class StorageContainerInfo:
|
||||||
id: str
|
id: str
|
||||||
wallet_file: WalletInfo
|
wallet: WalletInfo
|
||||||
|
|
||||||
|
|
||||||
class StorageContainer:
|
class StorageContainer:
|
||||||
|
@ -40,13 +41,10 @@ class StorageContainer:
|
||||||
def get_id(self) -> str:
|
def get_id(self) -> str:
|
||||||
return self.storage_container_info.id
|
return self.storage_container_info.id
|
||||||
|
|
||||||
def get_wallet_path(self) -> str:
|
def get_wallet(self) -> str:
|
||||||
return self.storage_container_info.wallet_file.path
|
return self.storage_container_info.wallet
|
||||||
|
|
||||||
def get_wallet_config_path(self) -> str:
|
@reporter.step("Generate new object and put in container")
|
||||||
return self.storage_container_info.wallet_file.config_path
|
|
||||||
|
|
||||||
@reporter.step_deco("Generate new object and put in container")
|
|
||||||
def generate_object(
|
def generate_object(
|
||||||
self,
|
self,
|
||||||
size: int,
|
size: int,
|
||||||
|
@ -59,37 +57,34 @@ class StorageContainer:
|
||||||
file_hash = get_file_hash(file_path)
|
file_hash = get_file_hash(file_path)
|
||||||
|
|
||||||
container_id = self.get_id()
|
container_id = self.get_id()
|
||||||
wallet_path = self.get_wallet_path()
|
wallet = self.get_wallet()
|
||||||
wallet_config = self.get_wallet_config_path()
|
|
||||||
with reporter.step(f"Put object with size {size} to container {container_id}"):
|
with reporter.step(f"Put object with size {size} to container {container_id}"):
|
||||||
if endpoint:
|
if endpoint:
|
||||||
object_id = put_object(
|
object_id = put_object(
|
||||||
wallet=wallet_path,
|
wallet=wallet,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
cid=container_id,
|
cid=container_id,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=endpoint,
|
endpoint=endpoint,
|
||||||
bearer=bearer_token,
|
bearer=bearer_token,
|
||||||
wallet_config=wallet_config,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
object_id = put_object_to_random_node(
|
object_id = put_object_to_random_node(
|
||||||
wallet=wallet_path,
|
wallet=wallet,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
cid=container_id,
|
cid=container_id,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
cluster=self.cluster,
|
cluster=self.cluster,
|
||||||
bearer=bearer_token,
|
bearer=bearer_token,
|
||||||
wallet_config=wallet_config,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
storage_object = StorageObjectInfo(
|
storage_object = StorageObjectInfo(
|
||||||
container_id,
|
container_id,
|
||||||
object_id,
|
object_id,
|
||||||
size=size,
|
size=size,
|
||||||
wallet_file_path=wallet_path,
|
wallet=wallet,
|
||||||
file_path=file_path,
|
file_path=file_path,
|
||||||
file_hash=file_hash,
|
file_hash=file_hash,
|
||||||
)
|
)
|
||||||
|
@ -100,18 +95,18 @@ class StorageContainer:
|
||||||
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
|
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
|
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
|
||||||
|
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Create Container")
|
@reporter.step("Create Container")
|
||||||
def create_container(
|
def create_container(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
rule: str = DEFAULT_PLACEMENT_RULE,
|
rule: str = DEFAULT_PLACEMENT_RULE,
|
||||||
basic_acl: str = "",
|
basic_acl: str = "",
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
session_token: str = "",
|
session_token: str = "",
|
||||||
session_wallet: str = "",
|
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
options: Optional[dict] = None,
|
options: Optional[dict] = None,
|
||||||
await_mode: bool = True,
|
await_mode: bool = True,
|
||||||
|
@ -122,7 +117,7 @@ def create_container(
|
||||||
A wrapper for `frostfs-cli container create` call.
|
A wrapper for `frostfs-cli container create` call.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (str): a wallet on whose behalf a container is created
|
wallet (WalletInfo): a wallet on whose behalf a container is created
|
||||||
rule (optional, str): placement rule for container
|
rule (optional, str): placement rule for container
|
||||||
basic_acl (optional, str): an ACL for container, will be
|
basic_acl (optional, str): an ACL for container, will be
|
||||||
appended to `--basic-acl` key
|
appended to `--basic-acl` key
|
||||||
|
@ -144,10 +139,9 @@ def create_container(
|
||||||
(str): CID of the created container
|
(str): CID of the created container
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.container.create(
|
result = cli.container.create(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=session_wallet if session_wallet else wallet,
|
|
||||||
policy=rule,
|
policy=rule,
|
||||||
basic_acl=basic_acl,
|
basic_acl=basic_acl,
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
|
@ -168,23 +162,17 @@ def create_container(
|
||||||
return cid
|
return cid
|
||||||
|
|
||||||
|
|
||||||
def wait_for_container_creation(
|
def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1):
|
||||||
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
|
|
||||||
):
|
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
containers = list_containers(wallet, shell, endpoint)
|
containers = list_containers(wallet, shell, endpoint)
|
||||||
if cid in containers:
|
if cid in containers:
|
||||||
return
|
return
|
||||||
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
|
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
|
||||||
sleep(sleep_interval)
|
sleep(sleep_interval)
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting")
|
||||||
f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def wait_for_container_deletion(
|
def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1):
|
||||||
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
|
|
||||||
):
|
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
try:
|
try:
|
||||||
get_container(wallet, cid, shell=shell, endpoint=endpoint)
|
get_container(wallet, cid, shell=shell, endpoint=endpoint)
|
||||||
|
@ -197,30 +185,27 @@ def wait_for_container_deletion(
|
||||||
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
|
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("List Containers")
|
@reporter.step("List Containers")
|
||||||
def list_containers(
|
def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]:
|
||||||
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
|
|
||||||
) -> list[str]:
|
|
||||||
"""
|
"""
|
||||||
A wrapper for `frostfs-cli container list` call. It returns all the
|
A wrapper for `frostfs-cli container list` call. It returns all the
|
||||||
available containers for the given wallet.
|
available containers for the given wallet.
|
||||||
Args:
|
Args:
|
||||||
wallet (str): a wallet on whose behalf we list the containers
|
wallet (WalletInfo): a wallet on whose behalf we list the containers
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
Returns:
|
Returns:
|
||||||
(list): list of containers
|
(list): list of containers
|
||||||
"""
|
"""
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
|
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout)
|
||||||
logger.info(f"Containers: \n{result}")
|
|
||||||
return result.stdout.split()
|
return result.stdout.split()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("List Objects in container")
|
@reporter.step("List Objects in container")
|
||||||
def list_objects(
|
def list_objects(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
container_id: str,
|
container_id: str,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
@ -230,7 +215,7 @@ def list_objects(
|
||||||
A wrapper for `frostfs-cli container list-objects` call. It returns all the
|
A wrapper for `frostfs-cli container list-objects` call. It returns all the
|
||||||
available objects in container.
|
available objects in container.
|
||||||
Args:
|
Args:
|
||||||
wallet (str): a wallet on whose behalf we list the containers objects
|
wallet (WalletInfo): a wallet on whose behalf we list the containers objects
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
container_id: cid of container
|
container_id: cid of container
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
@ -238,17 +223,15 @@ def list_objects(
|
||||||
Returns:
|
Returns:
|
||||||
(list): list of containers
|
(list): list of containers
|
||||||
"""
|
"""
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.container.list_objects(
|
result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout)
|
||||||
rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout
|
|
||||||
)
|
|
||||||
logger.info(f"Container objects: \n{result}")
|
logger.info(f"Container objects: \n{result}")
|
||||||
return result.stdout.split()
|
return result.stdout.split()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Container")
|
@reporter.step("Get Container")
|
||||||
def get_container(
|
def get_container(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
@ -259,7 +242,7 @@ def get_container(
|
||||||
A wrapper for `frostfs-cli container get` call. It extracts container's
|
A wrapper for `frostfs-cli container get` call. It extracts container's
|
||||||
attributes and rearranges them into a more compact view.
|
attributes and rearranges them into a more compact view.
|
||||||
Args:
|
Args:
|
||||||
wallet (str): path to a wallet on whose behalf we get the container
|
wallet (WalletInfo): path to a wallet on whose behalf we get the container
|
||||||
cid (str): ID of the container to get
|
cid (str): ID of the container to get
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
@ -269,10 +252,8 @@ def get_container(
|
||||||
(dict, str): dict of container attributes
|
(dict, str): dict of container attributes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.container.get(
|
result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout)
|
||||||
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
|
|
||||||
)
|
|
||||||
|
|
||||||
if not json_mode:
|
if not json_mode:
|
||||||
return result.stdout
|
return result.stdout
|
||||||
|
@ -286,40 +267,37 @@ def get_container(
|
||||||
return container_info
|
return container_info
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Delete Container")
|
@reporter.step("Delete Container")
|
||||||
# TODO: make the error message about a non-found container more user-friendly
|
# TODO: make the error message about a non-found container more user-friendly
|
||||||
def delete_container(
|
def delete_container(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
force: bool = False,
|
force: bool = False,
|
||||||
session_token: Optional[str] = None,
|
session_token: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
A wrapper for `frostfs-cli container delete` call.
|
A wrapper for `frostfs-cli container delete` call.
|
||||||
Args:
|
Args:
|
||||||
wallet (str): path to a wallet on whose behalf we delete the container
|
await_mode: Block execution until container is removed.
|
||||||
|
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
|
||||||
cid (str): ID of the container to delete
|
cid (str): ID of the container to delete
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
force (bool): do not check whether container contains locks and remove immediately
|
force (bool): do not check whether container contains locks and remove immediately
|
||||||
session_token: a path to session token file
|
session_token: a path to session token file
|
||||||
timeout: Timeout for the operation.
|
|
||||||
This function doesn't return anything.
|
This function doesn't return anything.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
cli.container.delete(
|
cli.container.delete(
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
force=force,
|
force=force,
|
||||||
session=session_token,
|
session=session_token,
|
||||||
await_mode=await_mode,
|
await_mode=await_mode,
|
||||||
timeout=timeout,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -349,11 +327,23 @@ def _parse_cid(output: str) -> str:
|
||||||
return splitted[1]
|
return splitted[1]
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Search container by name")
|
@reporter.step("Search for nodes with a container")
|
||||||
def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
|
def search_nodes_with_container(
|
||||||
list_cids = list_containers(wallet, shell, endpoint)
|
wallet: WalletInfo,
|
||||||
for cid in list_cids:
|
cid: str,
|
||||||
cont_info = get_container(wallet, cid, shell, endpoint, True)
|
shell: Shell,
|
||||||
if cont_info.get("attributes", {}).get("Name", None) == name:
|
endpoint: str,
|
||||||
return cid
|
cluster: Cluster,
|
||||||
return None
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
) -> list[ClusterNode]:
|
||||||
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
|
result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout)
|
||||||
|
|
||||||
|
pattern = r"[0-9]+(?:\.[0-9]+){3}"
|
||||||
|
nodes_ip = list(set(re.findall(pattern, result.stdout)))
|
||||||
|
|
||||||
|
with reporter.step(f"nodes ips = {nodes_ip}"):
|
||||||
|
nodes_list = cluster.get_nodes_by_ip(nodes_ip)
|
||||||
|
|
||||||
|
with reporter.step(f"Return nodes - {nodes_list}"):
|
||||||
|
return nodes_list
|
||||||
|
|
|
@ -5,22 +5,25 @@ import re
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.cli.neogo import NeoGo
|
from frostfs_testlib.cli.neogo import NeoGo
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing import wait_for_success
|
||||||
from frostfs_testlib.utils import json_utils
|
from frostfs_testlib.utils import json_utils
|
||||||
|
from frostfs_testlib.utils.cli_utils import parse_netmap_output
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get object from random node")
|
@reporter.step("Get object from random node")
|
||||||
def get_object_from_random_node(
|
def get_object_from_random_node(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -28,7 +31,6 @@ def get_object_from_random_node(
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
write_object: Optional[str] = None,
|
write_object: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -44,7 +46,6 @@ def get_object_from_random_node(
|
||||||
cluster: cluster object
|
cluster: cluster object
|
||||||
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
|
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
|
||||||
write_object (optional, str): path to downloaded file, appends to `--file` key
|
write_object (optional, str): path to downloaded file, appends to `--file` key
|
||||||
wallet_config(optional, str): path to the wallet config
|
|
||||||
no_progress(optional, bool): do not show progress bar
|
no_progress(optional, bool): do not show progress bar
|
||||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||||
session (optional, dict): path to a JSON-encoded container session token
|
session (optional, dict): path to a JSON-encoded container session token
|
||||||
|
@ -62,16 +63,15 @@ def get_object_from_random_node(
|
||||||
bearer,
|
bearer,
|
||||||
write_object,
|
write_object,
|
||||||
xhdr,
|
xhdr,
|
||||||
wallet_config,
|
|
||||||
no_progress,
|
no_progress,
|
||||||
session,
|
session,
|
||||||
timeout,
|
timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get object from {endpoint}")
|
@reporter.step("Get object from {endpoint}")
|
||||||
def get_object(
|
def get_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -79,23 +79,21 @@ def get_object(
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
write_object: Optional[str] = None,
|
write_object: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
) -> str:
|
) -> TestFile:
|
||||||
"""
|
"""
|
||||||
GET from FrostFS.
|
GET from FrostFS.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (str): wallet on whose behalf GET is done
|
wallet (WalletInfo): wallet on whose behalf GET is done
|
||||||
cid (str): ID of Container where we get the Object from
|
cid (str): ID of Container where we get the Object from
|
||||||
oid (str): Object ID
|
oid (str): Object ID
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
write_object: path to downloaded file, appends to `--file` key
|
write_object: path to downloaded file, appends to `--file` key
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
wallet_config(optional, str): path to the wallet config
|
|
||||||
no_progress(optional, bool): do not show progress bar
|
no_progress(optional, bool): do not show progress bar
|
||||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||||
session (optional, dict): path to a JSON-encoded container session token
|
session (optional, dict): path to a JSON-encoded container session token
|
||||||
|
@ -106,15 +104,14 @@ def get_object(
|
||||||
|
|
||||||
if not write_object:
|
if not write_object:
|
||||||
write_object = str(uuid.uuid4())
|
write_object = str(uuid.uuid4())
|
||||||
file_path = os.path.join(ASSETS_DIR, write_object)
|
test_file = TestFile(os.path.join(ASSETS_DIR, write_object))
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
cli.object.get(
|
cli.object.get(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file=file_path,
|
file=test_file,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
no_progress=no_progress,
|
no_progress=no_progress,
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
|
@ -122,19 +119,18 @@ def get_object(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
return file_path
|
return test_file
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Range Hash from {endpoint}")
|
@reporter.step("Get Range Hash from {endpoint}")
|
||||||
def get_range_hash(
|
def get_range_hash(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
range_cut: str,
|
range_cut: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -151,17 +147,15 @@ def get_range_hash(
|
||||||
range_cut: Range to take hash from in the form offset1:length1,...,
|
range_cut: Range to take hash from in the form offset1:length1,...,
|
||||||
value to pass to the `--range` parameter
|
value to pass to the `--range` parameter
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
wallet_config: path to the wallet config
|
|
||||||
xhdr: Request X-Headers in form of Key=Values
|
xhdr: Request X-Headers in form of Key=Values
|
||||||
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
|
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.object.hash(
|
result = cli.object.hash(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
range=range_cut,
|
range=range_cut,
|
||||||
|
@ -175,17 +169,17 @@ def get_range_hash(
|
||||||
return result.stdout.split(":")[1].strip()
|
return result.stdout.split(":")[1].strip()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Put object to random node")
|
@reporter.step("Put object to random node")
|
||||||
def put_object_to_random_node(
|
def put_object_to_random_node(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
path: str,
|
path: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
copies_number: Optional[int] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -201,9 +195,9 @@ def put_object_to_random_node(
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
cluster: cluster under test
|
cluster: cluster under test
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
|
copies_number: Number of copies of the object to store within the RPC call
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||||
cluster: cluster under test
|
cluster: cluster under test
|
||||||
wallet_config: path to the wallet config
|
|
||||||
no_progress: do not show progress bar
|
no_progress: do not show progress bar
|
||||||
expire_at: Last epoch in the life of the object
|
expire_at: Last epoch in the life of the object
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
|
@ -221,9 +215,9 @@ def put_object_to_random_node(
|
||||||
shell,
|
shell,
|
||||||
endpoint,
|
endpoint,
|
||||||
bearer,
|
bearer,
|
||||||
|
copies_number,
|
||||||
attributes,
|
attributes,
|
||||||
xhdr,
|
xhdr,
|
||||||
wallet_config,
|
|
||||||
expire_at,
|
expire_at,
|
||||||
no_progress,
|
no_progress,
|
||||||
session,
|
session,
|
||||||
|
@ -231,17 +225,17 @@ def put_object_to_random_node(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Put object at {endpoint} in container {cid}")
|
@reporter.step("Put object at {endpoint} in container {cid}")
|
||||||
def put_object(
|
def put_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
path: str,
|
path: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
copies_number: Optional[int] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -256,9 +250,9 @@ def put_object(
|
||||||
cid: ID of Container where we get the Object from
|
cid: ID of Container where we get the Object from
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
|
copies_number: Number of copies of the object to store within the RPC call
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
wallet_config: path to the wallet config
|
|
||||||
no_progress: do not show progress bar
|
no_progress: do not show progress bar
|
||||||
expire_at: Last epoch in the life of the object
|
expire_at: Last epoch in the life of the object
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
|
@ -268,14 +262,14 @@ def put_object(
|
||||||
(str): ID of uploaded Object
|
(str): ID of uploaded Object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.object.put(
|
result = cli.object.put(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
file=path,
|
file=path,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
copies_number=copies_number,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
no_progress=no_progress,
|
no_progress=no_progress,
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
|
@ -289,15 +283,14 @@ def put_object(
|
||||||
return oid.strip()
|
return oid.strip()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}")
|
@reporter.step("Delete object {cid}/{oid} from {endpoint}")
|
||||||
def delete_object(
|
def delete_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -312,7 +305,6 @@ def delete_object(
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
wallet_config: path to the wallet config
|
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
session: path to a JSON-encoded container session token
|
session: path to a JSON-encoded container session token
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
|
@ -320,10 +312,9 @@ def delete_object(
|
||||||
(str): Tombstone ID
|
(str): Tombstone ID
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.object.delete(
|
result = cli.object.delete(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -337,15 +328,14 @@ def delete_object(
|
||||||
return tombstone.strip()
|
return tombstone.strip()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Range")
|
@reporter.step("Get Range")
|
||||||
def get_range(
|
def get_range(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
range_cut: str,
|
range_cut: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -362,37 +352,35 @@ def get_range(
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
wallet_config: path to the wallet config
|
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
session: path to a JSON-encoded container session token
|
session: path to a JSON-encoded container session token
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
Returns:
|
Returns:
|
||||||
(str, bytes) - path to the file with range content and content of this file as bytes
|
(str, bytes) - path to the file with range content and content of this file as bytes
|
||||||
"""
|
"""
|
||||||
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
|
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
cli.object.range(
|
cli.object.range(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
range=range_cut,
|
range=range_cut,
|
||||||
file=range_file_path,
|
file=test_file,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
session=session,
|
session=session,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
with open(range_file_path, "rb") as file:
|
with open(test_file, "rb") as file:
|
||||||
content = file.read()
|
content = file.read()
|
||||||
return range_file_path, content
|
return test_file, content
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Lock Object")
|
@reporter.step("Lock Object")
|
||||||
def lock_object(
|
def lock_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -402,7 +390,6 @@ def lock_object(
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -429,13 +416,12 @@ def lock_object(
|
||||||
Lock object ID
|
Lock object ID
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.object.lock(
|
result = cli.object.lock(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
lifetime=lifetime,
|
lifetime=lifetime,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
address=address,
|
address=address,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -451,16 +437,15 @@ def lock_object(
|
||||||
return oid.strip()
|
return oid.strip()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Search object")
|
@reporter.step("Search object")
|
||||||
def search_object(
|
def search_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
filters: Optional[dict] = None,
|
filters: Optional[dict] = None,
|
||||||
expected_objects_list: Optional[list] = None,
|
expected_objects_list: Optional[list] = None,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
phy: bool = False,
|
phy: bool = False,
|
||||||
|
@ -478,7 +463,6 @@ def search_object(
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
filters: key=value pairs to filter Objects
|
filters: key=value pairs to filter Objects
|
||||||
expected_objects_list: a list of ObjectIDs to compare found Objects with
|
expected_objects_list: a list of ObjectIDs to compare found Objects with
|
||||||
wallet_config: path to the wallet config
|
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
session: path to a JSON-encoded container session token
|
session: path to a JSON-encoded container session token
|
||||||
phy: Search physically stored objects.
|
phy: Search physically stored objects.
|
||||||
|
@ -489,16 +473,13 @@ def search_object(
|
||||||
list of found ObjectIDs
|
list of found ObjectIDs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.object.search(
|
result = cli.object.search(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()]
|
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
|
||||||
if filters
|
|
||||||
else None,
|
|
||||||
session=session,
|
session=session,
|
||||||
phy=phy,
|
phy=phy,
|
||||||
root=root,
|
root=root,
|
||||||
|
@ -509,25 +490,18 @@ def search_object(
|
||||||
|
|
||||||
if expected_objects_list:
|
if expected_objects_list:
|
||||||
if sorted(found_objects) == sorted(expected_objects_list):
|
if sorted(found_objects) == sorted(expected_objects_list):
|
||||||
logger.info(
|
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'")
|
||||||
f"Found objects list '{found_objects}' "
|
|
||||||
f"is equal for expected list '{expected_objects_list}'"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'")
|
||||||
f"Found object list {found_objects} "
|
|
||||||
f"is not equal to expected list '{expected_objects_list}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
return found_objects
|
return found_objects
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get netmap netinfo")
|
@reporter.step("Get netmap netinfo")
|
||||||
def get_netmap_netinfo(
|
def get_netmap_netinfo(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -537,7 +511,7 @@ def get_netmap_netinfo(
|
||||||
Get netmap netinfo output from node
|
Get netmap netinfo output from node
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (str): wallet on whose behalf request is done
|
wallet (WalletInfo): wallet on whose behalf request is done
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
address: Address of wallet account
|
address: Address of wallet account
|
||||||
|
@ -550,9 +524,8 @@ def get_netmap_netinfo(
|
||||||
(dict): dict of parsed command output
|
(dict): dict of parsed command output
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
output = cli.netmap.netinfo(
|
output = cli.netmap.netinfo(
|
||||||
wallet=wallet,
|
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
address=address,
|
address=address,
|
||||||
ttl=ttl,
|
ttl=ttl,
|
||||||
|
@ -574,9 +547,9 @@ def get_netmap_netinfo(
|
||||||
return settings
|
return settings
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Head object")
|
@reporter.step("Head object")
|
||||||
def head_object(
|
def head_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -586,7 +559,6 @@ def head_object(
|
||||||
json_output: bool = True,
|
json_output: bool = True,
|
||||||
is_raw: bool = False,
|
is_raw: bool = False,
|
||||||
is_direct: bool = False,
|
is_direct: bool = False,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
):
|
):
|
||||||
|
@ -594,7 +566,7 @@ def head_object(
|
||||||
HEAD an Object.
|
HEAD an Object.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (str): wallet on whose behalf HEAD is done
|
wallet (WalletInfo): wallet on whose behalf HEAD is done
|
||||||
cid (str): ID of Container where we get the Object from
|
cid (str): ID of Container where we get the Object from
|
||||||
oid (str): ObjectID to HEAD
|
oid (str): ObjectID to HEAD
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
|
@ -606,7 +578,6 @@ def head_object(
|
||||||
turns into `--raw` key
|
turns into `--raw` key
|
||||||
is_direct(optional, bool): send request directly to the node or not; this flag
|
is_direct(optional, bool): send request directly to the node or not; this flag
|
||||||
turns into `--ttl 1` key
|
turns into `--ttl 1` key
|
||||||
wallet_config(optional, str): path to the wallet config
|
|
||||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||||
session (optional, dict): path to a JSON-encoded container session token
|
session (optional, dict): path to a JSON-encoded container session token
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
|
@ -617,10 +588,9 @@ def head_object(
|
||||||
(str): HEAD response as a plain text
|
(str): HEAD response as a plain text
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
result = cli.object.head(
|
result = cli.object.head(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -646,32 +616,32 @@ def head_object(
|
||||||
fst_line_idx = result.stdout.find("\n")
|
fst_line_idx = result.stdout.find("\n")
|
||||||
decoded = json.loads(result.stdout[fst_line_idx:])
|
decoded = json.loads(result.stdout[fst_line_idx:])
|
||||||
|
|
||||||
|
# if response
|
||||||
|
if "chunks" in decoded.keys():
|
||||||
|
logger.info("decoding ec chunks")
|
||||||
|
return decoded["chunks"]
|
||||||
|
|
||||||
# If response is Complex Object header, it has `splitId` key
|
# If response is Complex Object header, it has `splitId` key
|
||||||
if "splitId" in decoded.keys():
|
if "splitId" in decoded.keys():
|
||||||
logger.info("decoding split header")
|
|
||||||
return json_utils.decode_split_header(decoded)
|
return json_utils.decode_split_header(decoded)
|
||||||
|
|
||||||
# If response is Last or Linking Object header,
|
# If response is Last or Linking Object header,
|
||||||
# it has `header` dictionary and non-null `split` dictionary
|
# it has `header` dictionary and non-null `split` dictionary
|
||||||
if "split" in decoded["header"].keys():
|
if "split" in decoded["header"].keys():
|
||||||
if decoded["header"]["split"]:
|
if decoded["header"]["split"]:
|
||||||
logger.info("decoding linking object")
|
|
||||||
return json_utils.decode_linking_object(decoded)
|
return json_utils.decode_linking_object(decoded)
|
||||||
|
|
||||||
if decoded["header"]["objectType"] == "STORAGE_GROUP":
|
if decoded["header"]["objectType"] == "STORAGE_GROUP":
|
||||||
logger.info("decoding storage group")
|
|
||||||
return json_utils.decode_storage_group(decoded)
|
return json_utils.decode_storage_group(decoded)
|
||||||
|
|
||||||
if decoded["header"]["objectType"] == "TOMBSTONE":
|
if decoded["header"]["objectType"] == "TOMBSTONE":
|
||||||
logger.info("decoding tombstone")
|
|
||||||
return json_utils.decode_tombstone(decoded)
|
return json_utils.decode_tombstone(decoded)
|
||||||
|
|
||||||
logger.info("decoding simple header")
|
|
||||||
return json_utils.decode_simple_header(decoded)
|
return json_utils.decode_simple_header(decoded)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Run neo-go dump-keys")
|
@reporter.step("Run neo-go dump-keys")
|
||||||
def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
|
def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict:
|
||||||
"""
|
"""
|
||||||
Run neo-go dump keys command
|
Run neo-go dump keys command
|
||||||
|
|
||||||
|
@ -695,7 +665,7 @@ def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
|
||||||
return {address_id: wallet_key}
|
return {address_id: wallet_key}
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Run neo-go query height")
|
@reporter.step("Run neo-go query height")
|
||||||
def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
||||||
"""
|
"""
|
||||||
Run neo-go query height command
|
Run neo-go query height command
|
||||||
|
@ -720,8 +690,69 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
||||||
latest_block = first_line.split(":")
|
latest_block = first_line.split(":")
|
||||||
# taking second line from command's output contain wallet key
|
# taking second line from command's output contain wallet key
|
||||||
second_line = output.split("\n")[1]
|
second_line = output.split("\n")[1]
|
||||||
validated_state = second_line.split(":")
|
if second_line != "":
|
||||||
return {
|
validated_state = second_line.split(":")
|
||||||
latest_block[0].replace(":", ""): int(latest_block[1]),
|
return {
|
||||||
validated_state[0].replace(":", ""): int(validated_state[1]),
|
latest_block[0].replace(":", ""): int(latest_block[1]),
|
||||||
|
validated_state[0].replace(":", ""): int(validated_state[1]),
|
||||||
|
}
|
||||||
|
return {latest_block[0].replace(":", ""): int(latest_block[1])}
|
||||||
|
|
||||||
|
|
||||||
|
@wait_for_success()
|
||||||
|
@reporter.step("Search object nodes")
|
||||||
|
def get_object_nodes(
|
||||||
|
cluster: Cluster,
|
||||||
|
cid: str,
|
||||||
|
oid: str,
|
||||||
|
alive_node: ClusterNode,
|
||||||
|
bearer: str = "",
|
||||||
|
xhdr: Optional[dict] = None,
|
||||||
|
is_direct: bool = False,
|
||||||
|
verify_presence_all: bool = False,
|
||||||
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
) -> list[ClusterNode]:
|
||||||
|
shell = alive_node.host.get_shell()
|
||||||
|
endpoint = alive_node.storage_node.get_rpc_endpoint()
|
||||||
|
wallet = alive_node.storage_node.get_remote_wallet_path()
|
||||||
|
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
|
||||||
|
|
||||||
|
response = cli.object.nodes(
|
||||||
|
rpc_endpoint=endpoint,
|
||||||
|
cid=cid,
|
||||||
|
oid=oid,
|
||||||
|
bearer=bearer,
|
||||||
|
ttl=1 if is_direct else None,
|
||||||
|
json=True,
|
||||||
|
xhdr=xhdr,
|
||||||
|
timeout=timeout,
|
||||||
|
verify_presence_all=verify_presence_all,
|
||||||
|
)
|
||||||
|
|
||||||
|
response_json = json.loads(response.stdout)
|
||||||
|
# Currently, the command will show expected and confirmed nodes.
|
||||||
|
# And we (currently) count only nodes which are both expected and confirmed
|
||||||
|
object_nodes_id = {
|
||||||
|
required_node
|
||||||
|
for data_object in response_json["data_objects"]
|
||||||
|
for required_node in data_object["required_nodes"]
|
||||||
|
if required_node in data_object["confirmed_nodes"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
netmap_nodes_list = parse_netmap_output(
|
||||||
|
cli.netmap.snapshot(
|
||||||
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
|
).stdout
|
||||||
|
)
|
||||||
|
netmap_nodes = [
|
||||||
|
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id
|
||||||
|
]
|
||||||
|
|
||||||
|
object_nodes = [
|
||||||
|
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
|
||||||
|
]
|
||||||
|
|
||||||
|
return object_nodes
|
||||||
|
|
35
src/frostfs_testlib/steps/cli/tree.py
Normal file
35
src/frostfs_testlib/steps/cli/tree.py
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.plugins import load_plugin
|
||||||
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
||||||
|
from frostfs_testlib.shell import Shell
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
|
||||||
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Get Tree List")
|
||||||
|
def get_tree_list(
|
||||||
|
wallet: WalletInfo,
|
||||||
|
cid: str,
|
||||||
|
shell: Shell,
|
||||||
|
endpoint: str,
|
||||||
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
A wrapper for `frostfs-cli tree list` call.
|
||||||
|
Args:
|
||||||
|
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
|
||||||
|
cid (str): ID of the container to delete
|
||||||
|
shell: executor for cli command
|
||||||
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
timeout: Timeout for the operation.
|
||||||
|
This function doesn't return anything.
|
||||||
|
"""
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
|
cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout)
|
|
@ -12,15 +12,14 @@
|
||||||
import logging
|
import logging
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||||
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import head_object
|
from frostfs_testlib.steps.cli.object import head_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@ -45,7 +44,7 @@ def get_storage_object_chunks(
|
||||||
|
|
||||||
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
|
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
|
||||||
split_object_id = get_link_object(
|
split_object_id = get_link_object(
|
||||||
storage_object.wallet_file_path,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
storage_object.oid,
|
storage_object.oid,
|
||||||
shell,
|
shell,
|
||||||
|
@ -54,7 +53,7 @@ def get_storage_object_chunks(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
head = head_object(
|
head = head_object(
|
||||||
storage_object.wallet_file_path,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
split_object_id,
|
split_object_id,
|
||||||
shell,
|
shell,
|
||||||
|
@ -97,7 +96,7 @@ def get_complex_object_split_ranges(
|
||||||
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
|
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
|
||||||
for chunk_id in chunks_ids:
|
for chunk_id in chunks_ids:
|
||||||
head = head_object(
|
head = head_object(
|
||||||
storage_object.wallet_file_path,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
chunk_id,
|
chunk_id,
|
||||||
shell,
|
shell,
|
||||||
|
@ -113,15 +112,14 @@ def get_complex_object_split_ranges(
|
||||||
return ranges
|
return ranges
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Link Object")
|
@reporter.step("Get Link Object")
|
||||||
def get_link_object(
|
def get_link_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
nodes: list[StorageNode],
|
nodes: list[StorageNode],
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
wallet_config: str = DEFAULT_WALLET_CONFIG,
|
|
||||||
is_direct: bool = True,
|
is_direct: bool = True,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
):
|
):
|
||||||
|
@ -155,7 +153,6 @@ def get_link_object(
|
||||||
is_raw=True,
|
is_raw=True,
|
||||||
is_direct=is_direct,
|
is_direct=is_direct,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
wallet_config=wallet_config,
|
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
if resp["link"]:
|
if resp["link"]:
|
||||||
|
@ -166,9 +163,9 @@ def get_link_object(
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Last Object")
|
@reporter.step("Get Last Object")
|
||||||
def get_last_object(
|
def get_last_object(
|
||||||
wallet: str,
|
wallet: WalletInfo,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
|
|
@ -2,15 +2,9 @@ import logging
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
|
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
||||||
from frostfs_testlib.resources.cli import (
|
|
||||||
CLI_DEFAULT_TIMEOUT,
|
|
||||||
FROSTFS_ADM_CONFIG_PATH,
|
|
||||||
FROSTFS_ADM_EXEC,
|
|
||||||
FROSTFS_CLI_EXEC,
|
|
||||||
NEOGO_EXECUTABLE,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.payment_neogo import get_contract_hash
|
from frostfs_testlib.steps.payment_neogo import get_contract_hash
|
||||||
|
@ -19,11 +13,10 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
from frostfs_testlib.utils import datetime_utils, wallet_utils
|
from frostfs_testlib.utils import datetime_utils, wallet_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get epochs from nodes")
|
@reporter.step("Get epochs from nodes")
|
||||||
def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
|
def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
|
||||||
"""
|
"""
|
||||||
Get current epochs on each node.
|
Get current epochs on each node.
|
||||||
|
@ -41,10 +34,8 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
|
||||||
return epochs_by_node
|
return epochs_by_node
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Ensure fresh epoch")
|
@reporter.step("Ensure fresh epoch")
|
||||||
def ensure_fresh_epoch(
|
def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int:
|
||||||
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
|
|
||||||
) -> int:
|
|
||||||
# ensure new fresh epoch to avoid epoch switch during test session
|
# ensure new fresh epoch to avoid epoch switch during test session
|
||||||
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
||||||
current_epoch = get_epoch(shell, cluster, alive_node)
|
current_epoch = get_epoch(shell, cluster, alive_node)
|
||||||
|
@ -54,19 +45,17 @@ def ensure_fresh_epoch(
|
||||||
return epoch
|
return epoch
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Wait for epochs align in whole cluster")
|
@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs")
|
||||||
@wait_for_success(60, 5)
|
def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60):
|
||||||
def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None:
|
@wait_for_success(timeout, 5, None, True)
|
||||||
epochs = []
|
def check_epochs():
|
||||||
for node in cluster.services(StorageNode):
|
epochs_by_node = get_epochs_from_nodes(shell, cluster)
|
||||||
epochs.append(get_epoch(shell, cluster, node))
|
assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}"
|
||||||
unique_epochs = list(set(epochs))
|
|
||||||
assert (
|
check_epochs()
|
||||||
len(unique_epochs) == 1
|
|
||||||
), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Epoch")
|
@reporter.step("Get Epoch")
|
||||||
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
|
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
|
||||||
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
||||||
endpoint = alive_node.get_rpc_endpoint()
|
endpoint = alive_node.get_rpc_endpoint()
|
||||||
|
@ -79,8 +68,8 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
||||||
return int(epoch.stdout)
|
return int(epoch.stdout)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Tick Epoch")
|
@reporter.step("Tick Epoch")
|
||||||
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
|
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None):
|
||||||
"""
|
"""
|
||||||
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
|
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
|
||||||
Args:
|
Args:
|
||||||
|
@ -92,19 +81,24 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
||||||
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
||||||
remote_shell = alive_node.host.get_shell()
|
remote_shell = alive_node.host.get_shell()
|
||||||
|
|
||||||
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
if "force_transactions" not in alive_node.host.config.attributes:
|
||||||
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
||||||
frostfs_adm = FrostfsAdm(
|
frostfs_adm = FrostfsAdm(
|
||||||
shell=remote_shell,
|
shell=remote_shell,
|
||||||
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||||
config_file=FROSTFS_ADM_CONFIG_PATH,
|
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||||
)
|
)
|
||||||
frostfs_adm.morph.force_new_epoch()
|
frostfs_adm.morph.force_new_epoch(delta=delta)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Otherwise we tick epoch using transaction
|
# Otherwise we tick epoch using transaction
|
||||||
cur_epoch = get_epoch(shell, cluster)
|
cur_epoch = get_epoch(shell, cluster)
|
||||||
|
|
||||||
|
if delta:
|
||||||
|
next_epoch = cur_epoch + delta
|
||||||
|
else:
|
||||||
|
next_epoch = cur_epoch + 1
|
||||||
|
|
||||||
# Use first node by default
|
# Use first node by default
|
||||||
ir_node = cluster.services(InnerRing)[0]
|
ir_node = cluster.services(InnerRing)[0]
|
||||||
# In case if no local_wallet_path is provided, we use wallet_path
|
# In case if no local_wallet_path is provided, we use wallet_path
|
||||||
|
@ -121,7 +115,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
||||||
wallet_password=ir_wallet_pass,
|
wallet_password=ir_wallet_pass,
|
||||||
scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell),
|
scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell),
|
||||||
method="newEpoch",
|
method="newEpoch",
|
||||||
arguments=f"int:{cur_epoch + 1}",
|
arguments=f"int:{next_epoch}",
|
||||||
multisig_hash=f"{ir_address}:Global",
|
multisig_hash=f"{ir_address}:Global",
|
||||||
address=ir_address,
|
address=ir_address,
|
||||||
rpc_endpoint=morph_endpoint,
|
rpc_endpoint=morph_endpoint,
|
||||||
|
|
|
@ -10,93 +10,105 @@ from urllib.parse import quote_plus
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
|
from frostfs_testlib.cli import GenericCli
|
||||||
from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT
|
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE
|
||||||
|
from frostfs_testlib.s3.aws_cli_client import command_options
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
from frostfs_testlib.steps.cli.object import get_object
|
from frostfs_testlib.steps.cli.object import get_object
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
|
||||||
from frostfs_testlib.storage.cluster import StorageNode
|
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
|
||||||
from frostfs_testlib.utils.cli_utils import _cmd_run
|
from frostfs_testlib.testing.test_control import retry
|
||||||
from frostfs_testlib.utils.file_utils import get_file_hash
|
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
|
local_shell = LocalShell()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get via HTTP Gate")
|
@reporter.step("Get via HTTP Gate")
|
||||||
def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None):
|
def get_via_http_gate(
|
||||||
|
cid: str,
|
||||||
|
oid: str,
|
||||||
|
node: ClusterNode,
|
||||||
|
request_path: Optional[str] = None,
|
||||||
|
timeout: Optional[int] = 300,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate
|
This function gets given object from HTTP gate
|
||||||
cid: container id to get object from
|
cid: container id to get object from
|
||||||
oid: object ID
|
oid: object ID
|
||||||
endpoint: http gate endpoint
|
node: node to make request
|
||||||
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# if `request_path` parameter omitted, use default
|
# if `request_path` parameter omitted, use default
|
||||||
if request_path is None:
|
if request_path is None:
|
||||||
request = f"{endpoint}/get/{cid}/{oid}"
|
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
|
||||||
else:
|
else:
|
||||||
request = f"{endpoint}{request_path}"
|
request = f"{node.http_gate.get_endpoint()}{request_path}"
|
||||||
|
|
||||||
resp = requests.get(request, stream=True)
|
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"""Failed to get object via HTTP gate:
|
f"""Failed to get object via HTTP gate:
|
||||||
request: {resp.request.path_url},
|
request: {resp.request.path_url},
|
||||||
response: {resp.text},
|
response: {resp.text},
|
||||||
|
headers: {resp.headers},
|
||||||
status code: {resp.status_code} {resp.reason}"""
|
status code: {resp.status_code} {resp.reason}"""
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Request: {request}")
|
logger.info(f"Request: {request}")
|
||||||
_attach_allure_step(request, resp.status_code)
|
_attach_allure_step(request, resp.status_code)
|
||||||
|
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
|
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}"))
|
||||||
with open(file_path, "wb") as file:
|
with open(test_file, "wb") as file:
|
||||||
shutil.copyfileobj(resp.raw, file)
|
shutil.copyfileobj(resp.raw, file)
|
||||||
return file_path
|
return test_file
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get via Zip HTTP Gate")
|
@reporter.step("Get via Zip HTTP Gate")
|
||||||
def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str):
|
def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300):
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate
|
This function gets given object from HTTP gate
|
||||||
cid: container id to get object from
|
cid: container id to get object from
|
||||||
prefix: common prefix
|
prefix: common prefix
|
||||||
endpoint: http gate endpoint
|
node: node to make request
|
||||||
"""
|
"""
|
||||||
request = f"{endpoint}/zip/{cid}/{prefix}"
|
request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}"
|
||||||
resp = requests.get(request, stream=True)
|
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"""Failed to get object via HTTP gate:
|
f"""Failed to get object via HTTP gate:
|
||||||
request: {resp.request.path_url},
|
request: {resp.request.path_url},
|
||||||
response: {resp.text},
|
response: {resp.text},
|
||||||
|
headers: {resp.headers},
|
||||||
status code: {resp.status_code} {resp.reason}"""
|
status code: {resp.status_code} {resp.reason}"""
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Request: {request}")
|
logger.info(f"Request: {request}")
|
||||||
_attach_allure_step(request, resp.status_code)
|
_attach_allure_step(request, resp.status_code)
|
||||||
|
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
|
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip"))
|
||||||
with open(file_path, "wb") as file:
|
with open(test_file, "wb") as file:
|
||||||
shutil.copyfileobj(resp.raw, file)
|
shutil.copyfileobj(resp.raw, file)
|
||||||
|
|
||||||
with zipfile.ZipFile(file_path, "r") as zip_ref:
|
with zipfile.ZipFile(test_file, "r") as zip_ref:
|
||||||
zip_ref.extractall(ASSETS_DIR)
|
zip_ref.extractall(ASSETS_DIR)
|
||||||
|
|
||||||
return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
|
return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get via HTTP Gate by attribute")
|
@reporter.step("Get via HTTP Gate by attribute")
|
||||||
def get_via_http_gate_by_attribute(
|
def get_via_http_gate_by_attribute(
|
||||||
cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None
|
cid: str,
|
||||||
|
attribute: dict,
|
||||||
|
node: ClusterNode,
|
||||||
|
request_path: Optional[str] = None,
|
||||||
|
timeout: Optional[int] = 300,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate
|
This function gets given object from HTTP gate
|
||||||
|
@ -109,31 +121,32 @@ def get_via_http_gate_by_attribute(
|
||||||
attr_value = quote_plus(str(attribute.get(attr_name)))
|
attr_value = quote_plus(str(attribute.get(attr_name)))
|
||||||
# if `request_path` parameter ommited, use default
|
# if `request_path` parameter ommited, use default
|
||||||
if request_path is None:
|
if request_path is None:
|
||||||
request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
|
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
|
||||||
else:
|
else:
|
||||||
request = f"{endpoint}{request_path}"
|
request = f"{node.http_gate.get_endpoint()}{request_path}"
|
||||||
|
|
||||||
resp = requests.get(request, stream=True)
|
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"""Failed to get object via HTTP gate:
|
f"""Failed to get object via HTTP gate:
|
||||||
request: {resp.request.path_url},
|
request: {resp.request.path_url},
|
||||||
response: {resp.text},
|
response: {resp.text},
|
||||||
|
headers: {resp.headers},
|
||||||
status code: {resp.status_code} {resp.reason}"""
|
status code: {resp.status_code} {resp.reason}"""
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Request: {request}")
|
logger.info(f"Request: {request}")
|
||||||
_attach_allure_step(request, resp.status_code)
|
_attach_allure_step(request, resp.status_code)
|
||||||
|
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
|
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}"))
|
||||||
with open(file_path, "wb") as file:
|
with open(test_file, "wb") as file:
|
||||||
shutil.copyfileobj(resp.raw, file)
|
shutil.copyfileobj(resp.raw, file)
|
||||||
return file_path
|
return test_file
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Upload via HTTP Gate")
|
@reporter.step("Upload via HTTP Gate")
|
||||||
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None) -> str:
|
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str:
|
||||||
"""
|
"""
|
||||||
This function upload given object through HTTP gate
|
This function upload given object through HTTP gate
|
||||||
cid: CID to get object from
|
cid: CID to get object from
|
||||||
|
@ -144,7 +157,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d
|
||||||
request = f"{endpoint}/upload/{cid}"
|
request = f"{endpoint}/upload/{cid}"
|
||||||
files = {"upload_file": open(path, "rb")}
|
files = {"upload_file": open(path, "rb")}
|
||||||
body = {"filename": path}
|
body = {"filename": path}
|
||||||
resp = requests.post(request, files=files, data=body, headers=headers)
|
resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
@ -162,7 +175,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d
|
||||||
return resp.json().get("object_id")
|
return resp.json().get("object_id")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Check is the passed object large")
|
@reporter.step("Check is the passed object large")
|
||||||
def is_object_large(filepath: str) -> bool:
|
def is_object_large(filepath: str) -> bool:
|
||||||
"""
|
"""
|
||||||
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
|
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
|
||||||
|
@ -176,7 +189,7 @@ def is_object_large(filepath: str) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Upload via HTTP Gate using Curl")
|
@reporter.step("Upload via HTTP Gate using Curl")
|
||||||
def upload_via_http_gate_curl(
|
def upload_via_http_gate_curl(
|
||||||
cid: str,
|
cid: str,
|
||||||
filepath: str,
|
filepath: str,
|
||||||
|
@ -201,16 +214,16 @@ def upload_via_http_gate_curl(
|
||||||
large_object = is_object_large(filepath)
|
large_object = is_object_large(filepath)
|
||||||
if large_object:
|
if large_object:
|
||||||
# pre-clean
|
# pre-clean
|
||||||
_cmd_run("rm pipe -f")
|
local_shell.exec("rm pipe -f")
|
||||||
files = f"file=@pipe;filename={os.path.basename(filepath)}"
|
files = f"file=@pipe;filename={os.path.basename(filepath)}"
|
||||||
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}"
|
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}"
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = local_shell.exec(cmd, command_options)
|
||||||
# clean up pipe
|
# clean up pipe
|
||||||
_cmd_run("rm pipe")
|
local_shell.exec("rm pipe")
|
||||||
else:
|
else:
|
||||||
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
|
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
|
||||||
cmd = f"curl -F '{files}' {attributes} {request}"
|
cmd = f"curl -k -F '{files}' {attributes} {request}"
|
||||||
output = _cmd_run(cmd)
|
output = local_shell.exec(cmd)
|
||||||
|
|
||||||
if error_pattern:
|
if error_pattern:
|
||||||
match = error_pattern.casefold() in str(output).casefold()
|
match = error_pattern.casefold() in str(output).casefold()
|
||||||
|
@ -223,21 +236,22 @@ def upload_via_http_gate_curl(
|
||||||
return oid_re.group(1)
|
return oid_re.group(1)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get via HTTP Gate using Curl")
|
@retry(max_attempts=3, sleep_interval=1)
|
||||||
def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str:
|
@reporter.step("Get via HTTP Gate using Curl")
|
||||||
|
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile:
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate using curl utility.
|
This function gets given object from HTTP gate using curl utility.
|
||||||
cid: CID to get object from
|
cid: CID to get object from
|
||||||
oid: object OID
|
oid: object OID
|
||||||
endpoint: http gate endpoint
|
node: node for request
|
||||||
"""
|
"""
|
||||||
request = f"{endpoint}/get/{cid}/{oid}"
|
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
|
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}"))
|
||||||
|
|
||||||
cmd = f"curl {request} > {file_path}"
|
curl = GenericCli("curl", node.host)
|
||||||
_cmd_run(cmd)
|
curl(f"-k ", f"{request} > {test_file}", shell=local_shell)
|
||||||
|
|
||||||
return file_path
|
return test_file
|
||||||
|
|
||||||
|
|
||||||
def _attach_allure_step(request: str, status_code: int, req_type="GET"):
|
def _attach_allure_step(request: str, status_code: int, req_type="GET"):
|
||||||
|
@ -246,26 +260,31 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
|
||||||
reporter.attach(command_attachment, f"{req_type} Request")
|
reporter.attach(command_attachment, f"{req_type} Request")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Try to get object and expect error")
|
@reporter.step("Try to get object and expect error")
|
||||||
def try_to_get_object_and_expect_error(
|
def try_to_get_object_and_expect_error(
|
||||||
cid: str, oid: str, error_pattern: str, endpoint: str
|
cid: str,
|
||||||
|
oid: str,
|
||||||
|
node: ClusterNode,
|
||||||
|
error_pattern: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
|
get_via_http_gate(cid=cid, oid=oid, node=node)
|
||||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
match = error_pattern.casefold() in str(err).casefold()
|
match = error_pattern.casefold() in str(err).casefold()
|
||||||
assert match, f"Expected {err} to match {error_pattern}"
|
assert match, f"Expected {err} to match {error_pattern}"
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Verify object can be get using HTTP header attribute")
|
@reporter.step("Verify object can be get using HTTP header attribute")
|
||||||
def get_object_by_attr_and_verify_hashes(
|
def get_object_by_attr_and_verify_hashes(
|
||||||
oid: str, file_name: str, cid: str, attrs: dict, endpoint: str
|
oid: str,
|
||||||
|
file_name: str,
|
||||||
|
cid: str,
|
||||||
|
attrs: dict,
|
||||||
|
node: ClusterNode,
|
||||||
) -> None:
|
) -> None:
|
||||||
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
|
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node)
|
||||||
got_file_path_http_attr = get_via_http_gate_by_attribute(
|
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node)
|
||||||
cid=cid, attribute=attrs, endpoint=endpoint
|
|
||||||
)
|
|
||||||
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
|
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
|
||||||
|
|
||||||
|
|
||||||
|
@ -276,7 +295,7 @@ def verify_object_hash(
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
nodes: list[StorageNode],
|
nodes: list[StorageNode],
|
||||||
endpoint: str,
|
request_node: ClusterNode,
|
||||||
object_getter=None,
|
object_getter=None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
|
@ -302,7 +321,7 @@ def verify_object_hash(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=random_node.get_rpc_endpoint(),
|
endpoint=random_node.get_rpc_endpoint(),
|
||||||
)
|
)
|
||||||
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint)
|
got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node)
|
||||||
|
|
||||||
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
|
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
|
||||||
|
|
||||||
|
@ -311,18 +330,14 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st
|
||||||
msg = "Expected hashes are equal for files {f1} and {f2}"
|
msg = "Expected hashes are equal for files {f1} and {f2}"
|
||||||
got_file_hash_http = get_file_hash(got_file_1)
|
got_file_hash_http = get_file_hash(got_file_1)
|
||||||
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
|
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
|
||||||
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
|
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1)
|
||||||
f1=orig_file_name, f2=got_file_1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def attr_into_header(attrs: dict) -> dict:
|
def attr_into_header(attrs: dict) -> dict:
|
||||||
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
|
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco(
|
@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'")
|
||||||
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
|
|
||||||
)
|
|
||||||
def attr_into_str_header_curl(attrs: dict) -> list:
|
def attr_into_str_header_curl(attrs: dict) -> list:
|
||||||
headers = []
|
headers = []
|
||||||
for k, v in attrs.items():
|
for k, v in attrs.items():
|
||||||
|
@ -331,23 +346,29 @@ def attr_into_str_header_curl(attrs: dict) -> list:
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco(
|
@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error")
|
||||||
"Try to get object via http (pass http_request and optional attributes) and expect error"
|
|
||||||
)
|
|
||||||
def try_to_get_object_via_passed_request_and_expect_error(
|
def try_to_get_object_via_passed_request_and_expect_error(
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
node: ClusterNode,
|
||||||
error_pattern: str,
|
error_pattern: str,
|
||||||
endpoint: str,
|
|
||||||
http_request_path: str,
|
http_request_path: str,
|
||||||
attrs: Optional[dict] = None,
|
attrs: Optional[dict] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
if attrs is None:
|
if attrs is None:
|
||||||
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path)
|
get_via_http_gate(
|
||||||
|
cid=cid,
|
||||||
|
oid=oid,
|
||||||
|
node=node,
|
||||||
|
request_path=http_request_path,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
get_via_http_gate_by_attribute(
|
get_via_http_gate_by_attribute(
|
||||||
cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path
|
cid=cid,
|
||||||
|
attribute=attrs,
|
||||||
|
node=node,
|
||||||
|
request_path=http_request_path,
|
||||||
)
|
)
|
||||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
45
src/frostfs_testlib/steps/metrics.py
Normal file
45
src/frostfs_testlib/steps/metrics.py
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Check metrics result")
|
||||||
|
@wait_for_success(interval=10)
|
||||||
|
def check_metrics_counter(
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
|
operator: str = "==",
|
||||||
|
counter_exp: int = 0,
|
||||||
|
parse_from_command: bool = False,
|
||||||
|
**metrics_greps: str,
|
||||||
|
):
|
||||||
|
counter_act = 0
|
||||||
|
for cluster_node in cluster_nodes:
|
||||||
|
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
|
||||||
|
assert eval(
|
||||||
|
f"{counter_act} {operator} {counter_exp}"
|
||||||
|
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}"
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Get metrics value from node: {node}")
|
||||||
|
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
|
||||||
|
try:
|
||||||
|
command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
|
||||||
|
if parse_from_command:
|
||||||
|
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps)
|
||||||
|
else:
|
||||||
|
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout)
|
||||||
|
except RuntimeError as e:
|
||||||
|
metrics_counter = 0
|
||||||
|
|
||||||
|
return metrics_counter
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Parse metrics count and calc sum of result")
|
||||||
|
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
|
||||||
|
if command:
|
||||||
|
result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout)
|
||||||
|
else:
|
||||||
|
result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout)
|
||||||
|
return sum(map(lambda x: int(float(x)), result))
|
19
src/frostfs_testlib/steps/network.py
Normal file
19
src/frostfs_testlib/steps/network.py
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
from frostfs_testlib.shell import CommandOptions
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
|
class IpHelper:
|
||||||
|
@staticmethod
|
||||||
|
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
for ip in block_ip:
|
||||||
|
shell.exec(f"ip route add blackhole {ip}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def restore_input_traffic_to_node(node: ClusterNode) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False))
|
||||||
|
if unlock_ip.return_code != 0:
|
||||||
|
return
|
||||||
|
for ip in unlock_ip.stdout.strip().split("\n"):
|
||||||
|
shell.exec(f"ip route del blackhole {ip.split(' ')[1]}")
|
|
@ -6,21 +6,16 @@ from dataclasses import dataclass
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
|
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.cli import (
|
|
||||||
FROSTFS_ADM_CONFIG_PATH,
|
|
||||||
FROSTFS_ADM_EXEC,
|
|
||||||
FROSTFS_CLI_EXEC,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.epoch import tick_epoch
|
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
||||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,45 +35,7 @@ class HealthStatus:
|
||||||
return HealthStatus(network, health)
|
return HealthStatus(network, health)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop random storage nodes")
|
@reporter.step("Get Locode from random storage node")
|
||||||
def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]:
|
|
||||||
"""
|
|
||||||
Shuts down the given number of randomly selected storage nodes.
|
|
||||||
Args:
|
|
||||||
number: the number of storage nodes to stop
|
|
||||||
nodes: the list of storage nodes to stop
|
|
||||||
Returns:
|
|
||||||
the list of nodes that were stopped
|
|
||||||
"""
|
|
||||||
nodes_to_stop = random.sample(nodes, number)
|
|
||||||
for node in nodes_to_stop:
|
|
||||||
node.stop_service()
|
|
||||||
return nodes_to_stop
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Start storage node")
|
|
||||||
def start_storage_nodes(nodes: list[StorageNode]) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
nodes: the list of nodes to start
|
|
||||||
"""
|
|
||||||
for node in nodes:
|
|
||||||
node.start_service()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop storage node")
|
|
||||||
def stop_storage_nodes(nodes: list[StorageNode]) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
nodes: the list of nodes to start
|
|
||||||
"""
|
|
||||||
for node in nodes:
|
|
||||||
node.stop_service()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Locode from random storage node")
|
|
||||||
def get_locode_from_random_node(cluster: Cluster) -> str:
|
def get_locode_from_random_node(cluster: Cluster) -> str:
|
||||||
node = random.choice(cluster.services(StorageNode))
|
node = random.choice(cluster.services(StorageNode))
|
||||||
locode = node.get_un_locode()
|
locode = node.get_un_locode()
|
||||||
|
@ -86,7 +43,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str:
|
||||||
return locode
|
return locode
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Healthcheck for storage node {node}")
|
@reporter.step("Healthcheck for storage node {node}")
|
||||||
def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
|
def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
|
||||||
"""
|
"""
|
||||||
The function returns storage node's health status.
|
The function returns storage node's health status.
|
||||||
|
@ -95,12 +52,27 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
|
||||||
Returns:
|
Returns:
|
||||||
health status as HealthStatus object.
|
health status as HealthStatus object.
|
||||||
"""
|
"""
|
||||||
command = "control healthcheck"
|
|
||||||
output = _run_control_command_with_retries(node, command)
|
host = node.host
|
||||||
return HealthStatus.from_stdout(output)
|
service_config = host.get_service_config(node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
control_endpoint = service_config.attributes["control_endpoint"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
cli_config = host.get_cli_config("frostfs-cli")
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
||||||
|
result = cli.control.healthcheck(control_endpoint)
|
||||||
|
|
||||||
|
return HealthStatus.from_stdout(result.stdout)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Set status for {node}")
|
@reporter.step("Set status for {node}")
|
||||||
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
|
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
|
||||||
"""
|
"""
|
||||||
The function sets particular status for given node.
|
The function sets particular status for given node.
|
||||||
|
@ -109,11 +81,24 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) ->
|
||||||
status: online or offline.
|
status: online or offline.
|
||||||
retries (optional, int): number of retry attempts if it didn't work from the first time
|
retries (optional, int): number of retry attempts if it didn't work from the first time
|
||||||
"""
|
"""
|
||||||
command = f"control set-status --status {status}"
|
host = node.host
|
||||||
_run_control_command_with_retries(node, command, retries)
|
service_config = host.get_service_config(node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
control_endpoint = service_config.attributes["control_endpoint"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
cli_config = host.get_cli_config("frostfs-cli")
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
||||||
|
cli.control.set_status(control_endpoint, status)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get netmap snapshot")
|
@reporter.step("Get netmap snapshot")
|
||||||
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
|
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
|
||||||
"""
|
"""
|
||||||
The function returns string representation of netmap snapshot.
|
The function returns string representation of netmap snapshot.
|
||||||
|
@ -127,14 +112,11 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
|
||||||
storage_wallet_path = node.get_wallet_path()
|
storage_wallet_path = node.get_wallet_path()
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
|
||||||
return cli.netmap.snapshot(
|
return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout
|
||||||
rpc_endpoint=node.get_rpc_endpoint(),
|
|
||||||
wallet=storage_wallet_path,
|
|
||||||
).stdout
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get shard list for {node}")
|
@reporter.step("Get shard list for {node}")
|
||||||
def node_shard_list(node: StorageNode) -> list[str]:
|
def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]:
|
||||||
"""
|
"""
|
||||||
The function returns list of shards for specified storage node.
|
The function returns list of shards for specified storage node.
|
||||||
Args:
|
Args:
|
||||||
|
@ -142,112 +124,139 @@ def node_shard_list(node: StorageNode) -> list[str]:
|
||||||
Returns:
|
Returns:
|
||||||
list of shards.
|
list of shards.
|
||||||
"""
|
"""
|
||||||
command = "control shards list"
|
host = node.host
|
||||||
output = _run_control_command_with_retries(node, command)
|
service_config = host.get_service_config(node.name)
|
||||||
return re.findall(r"Shard (.*):", output)
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
control_endpoint = service_config.attributes["control_endpoint"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
cli_config = host.get_cli_config("frostfs-cli")
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
||||||
|
result = cli.shards.list(endpoint=control_endpoint, json_mode=json)
|
||||||
|
|
||||||
|
return re.findall(r"Shard (.*):", result.stdout)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Shard set for {node}")
|
@reporter.step("Shard set for {node}")
|
||||||
def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
|
def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None:
|
||||||
"""
|
"""
|
||||||
The function sets mode for specified shard.
|
The function sets mode for specified shard.
|
||||||
Args:
|
Args:
|
||||||
node: node on which shard mode should be set.
|
node: node on which shard mode should be set.
|
||||||
"""
|
"""
|
||||||
command = f"control shards set-mode --id {shard} --mode {mode}"
|
host = node.host
|
||||||
return _run_control_command_with_retries(node, command)
|
service_config = host.get_service_config(node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
control_endpoint = service_config.attributes["control_endpoint"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
cli_config = host.get_cli_config("frostfs-cli")
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
||||||
|
cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Drop object from {node}")
|
@reporter.step("Drop object from {node}")
|
||||||
def drop_object(node: StorageNode, cid: str, oid: str) -> str:
|
def drop_object(node: StorageNode, cid: str, oid: str) -> None:
|
||||||
"""
|
"""
|
||||||
The function drops object from specified node.
|
The function drops object from specified node.
|
||||||
Args:
|
Args:
|
||||||
node_id str: node from which object should be dropped.
|
node: node from which object should be dropped.
|
||||||
"""
|
"""
|
||||||
command = f"control drop-objects -o {cid}/{oid}"
|
host = node.host
|
||||||
return _run_control_command_with_retries(node, command)
|
service_config = host.get_service_config(node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
control_endpoint = service_config.attributes["control_endpoint"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
cli_config = host.get_cli_config("frostfs-cli")
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
||||||
|
objects = f"{cid}/{oid}"
|
||||||
|
cli.control.drop_objects(control_endpoint, objects)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Delete data from host for node {node}")
|
@reporter.step("Delete data from host for node {node}")
|
||||||
def delete_node_data(node: StorageNode) -> None:
|
def delete_node_data(node: StorageNode) -> None:
|
||||||
node.stop_service()
|
node.stop_service()
|
||||||
node.host.delete_storage_node_data(node.name)
|
node.host.delete_storage_node_data(node.name)
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Exclude node {node_to_exclude} from network map")
|
@reporter.step("Exclude node {node_to_exclude} from network map")
|
||||||
def exclude_node_from_network_map(
|
def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
|
||||||
node_to_exclude: StorageNode,
|
|
||||||
alive_node: StorageNode,
|
|
||||||
shell: Shell,
|
|
||||||
cluster: Cluster,
|
|
||||||
) -> None:
|
|
||||||
node_netmap_key = node_to_exclude.get_wallet_public_key()
|
node_netmap_key = node_to_exclude.get_wallet_public_key()
|
||||||
|
|
||||||
storage_node_set_status(node_to_exclude, status="offline")
|
storage_node_set_status(node_to_exclude, status="offline")
|
||||||
|
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
tick_epoch(shell, cluster)
|
tick_epoch(shell, cluster)
|
||||||
|
wait_for_epochs_align(shell, cluster)
|
||||||
|
|
||||||
snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
|
snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
|
||||||
assert (
|
assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map"
|
||||||
node_netmap_key not in snapshot
|
|
||||||
), f"Expected node with key {node_netmap_key} to be absent in network map"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Include node {node_to_include} into network map")
|
@reporter.step("Include node {node_to_include} into network map")
|
||||||
def include_node_to_network_map(
|
def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
|
||||||
node_to_include: StorageNode,
|
|
||||||
alive_node: StorageNode,
|
|
||||||
shell: Shell,
|
|
||||||
cluster: Cluster,
|
|
||||||
) -> None:
|
|
||||||
storage_node_set_status(node_to_include, status="online")
|
storage_node_set_status(node_to_include, status="online")
|
||||||
|
|
||||||
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
||||||
# First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete.
|
# First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete.
|
||||||
|
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
||||||
tick_epoch(shell, cluster)
|
tick_epoch(shell, cluster)
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
||||||
|
|
||||||
check_node_in_map(node_to_include, shell, alive_node)
|
await_node_in_map(node_to_include, shell, alive_node)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Check node {node} in network map")
|
@reporter.step("Check node {node} in network map")
|
||||||
def check_node_in_map(
|
def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
||||||
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
|
|
||||||
) -> None:
|
|
||||||
alive_node = alive_node or node
|
alive_node = alive_node or node
|
||||||
|
|
||||||
node_netmap_key = node.get_wallet_public_key()
|
node_netmap_key = node.get_wallet_public_key()
|
||||||
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
|
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
|
||||||
|
|
||||||
snapshot = get_netmap_snapshot(alive_node, shell)
|
snapshot = get_netmap_snapshot(alive_node, shell)
|
||||||
assert (
|
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
|
||||||
node_netmap_key in snapshot
|
|
||||||
), f"Expected node with key {node_netmap_key} to be in network map"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Check node {node} NOT in network map")
|
@wait_for_success(300, 15, title="Await node {node} in network map")
|
||||||
def check_node_not_in_map(
|
def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
||||||
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
|
check_node_in_map(node, shell, alive_node)
|
||||||
) -> None:
|
|
||||||
|
|
||||||
|
@reporter.step("Check node {node} NOT in network map")
|
||||||
|
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
||||||
alive_node = alive_node or node
|
alive_node = alive_node or node
|
||||||
|
|
||||||
node_netmap_key = node.get_wallet_public_key()
|
node_netmap_key = node.get_wallet_public_key()
|
||||||
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
|
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
|
||||||
|
|
||||||
snapshot = get_netmap_snapshot(alive_node, shell)
|
snapshot = get_netmap_snapshot(alive_node, shell)
|
||||||
assert (
|
assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map"
|
||||||
node_netmap_key not in snapshot
|
|
||||||
), f"Expected node with key {node_netmap_key} to be NOT in network map"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Wait for node {node} is ready")
|
@reporter.step("Wait for node {node} is ready")
|
||||||
def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
||||||
timeout, attempts = 30, 6
|
timeout, attempts = 60, 15
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
try:
|
try:
|
||||||
health_check = storage_node_healthcheck(node)
|
health_check = storage_node_healthcheck(node)
|
||||||
|
@ -256,18 +265,11 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.warning(f"Node {node} is not ready:\n{err}")
|
logger.warning(f"Node {node} is not ready:\n{err}")
|
||||||
sleep(timeout)
|
sleep(timeout)
|
||||||
raise AssertionError(
|
raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds")
|
||||||
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Remove nodes from network map trough cli-adm morph command")
|
@reporter.step("Remove nodes from network map trough cli-adm morph command")
|
||||||
def remove_nodes_from_map_morph(
|
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None):
|
||||||
shell: Shell,
|
|
||||||
cluster: Cluster,
|
|
||||||
remove_nodes: list[StorageNode],
|
|
||||||
alive_node: Optional[StorageNode] = None,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
|
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
|
||||||
using frostfs-adm
|
using frostfs-adm
|
||||||
|
@ -286,66 +288,5 @@ def remove_nodes_from_map_morph(
|
||||||
|
|
||||||
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
||||||
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
||||||
frostfsadm = FrostfsAdm(
|
frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
||||||
shell=remote_shell,
|
|
||||||
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
|
||||||
config_file=FROSTFS_ADM_CONFIG_PATH,
|
|
||||||
)
|
|
||||||
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
||||||
|
|
||||||
|
|
||||||
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
|
|
||||||
for attempt in range(1 + retries): # original attempt + specified retries
|
|
||||||
try:
|
|
||||||
return _run_control_command(node, command)
|
|
||||||
except AssertionError as err:
|
|
||||||
if attempt < retries:
|
|
||||||
logger.warning(f"Command {command} failed with error {err} and will be retried")
|
|
||||||
continue
|
|
||||||
raise AssertionError(f"Command {command} failed with error {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
def _run_control_command(node: StorageNode, command: str) -> None:
|
|
||||||
host = node.host
|
|
||||||
|
|
||||||
service_config = host.get_service_config(node.name)
|
|
||||||
wallet_path = service_config.attributes["wallet_path"]
|
|
||||||
wallet_password = service_config.attributes["wallet_password"]
|
|
||||||
control_endpoint = service_config.attributes["control_endpoint"]
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
|
||||||
wallet_config = f'password: "{wallet_password}"'
|
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
|
||||||
|
|
||||||
cli_config = host.get_cli_config("frostfs-cli")
|
|
||||||
|
|
||||||
# TODO: implement cli.control
|
|
||||||
# cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
|
||||||
result = shell.exec(
|
|
||||||
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
|
|
||||||
f"--wallet {wallet_path} --config {wallet_config_path}"
|
|
||||||
)
|
|
||||||
return result.stdout
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Start services s3gate ")
|
|
||||||
def start_s3gates(cluster: Cluster) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
cluster: cluster instance under test
|
|
||||||
"""
|
|
||||||
for gate in cluster.services(S3Gate):
|
|
||||||
gate.start_service()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop services s3gate ")
|
|
||||||
def stop_s3gates(cluster: Cluster) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
cluster: cluster instance under test
|
|
||||||
"""
|
|
||||||
for gate in cluster.services(S3Gate):
|
|
||||||
gate.stop_service()
|
|
||||||
|
|
|
@ -8,20 +8,18 @@ from typing import Optional
|
||||||
from neo3.wallet import utils as neo3_utils
|
from neo3.wallet import utils as neo3_utils
|
||||||
from neo3.wallet import wallet as neo3_wallet
|
from neo3.wallet import wallet as neo3_wallet
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import NeoGo
|
from frostfs_testlib.cli import NeoGo
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
|
||||||
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain
|
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
|
||||||
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
|
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
EMPTY_PASSWORD = ""
|
EMPTY_PASSWORD = ""
|
||||||
TX_PERSIST_TIMEOUT = 15 # seconds
|
TX_PERSIST_TIMEOUT = 15 # seconds
|
||||||
ASSET_POWER_MAINCHAIN = 10**8
|
|
||||||
ASSET_POWER_SIDECHAIN = 10**12
|
ASSET_POWER_SIDECHAIN = 10**12
|
||||||
|
|
||||||
|
|
||||||
|
@ -42,32 +40,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell)
|
||||||
return bytes.decode(base64.b64decode(stack_data[0]["value"]))
|
return bytes.decode(base64.b64decode(stack_data[0]["value"]))
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Withdraw Mainnet Gas")
|
def transaction_accepted(morph_chain: MorphChain, tx_id: str):
|
||||||
def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int):
|
|
||||||
address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD)
|
|
||||||
scripthash = neo3_utils.address_to_script_hash(address)
|
|
||||||
|
|
||||||
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
|
||||||
out = neogo.contract.invokefunction(
|
|
||||||
wallet=wlt,
|
|
||||||
address=address,
|
|
||||||
rpc_endpoint=main_chain.get_endpoint(),
|
|
||||||
scripthash=FROSTFS_CONTRACT,
|
|
||||||
method="withdraw",
|
|
||||||
arguments=f"{scripthash} int:{amount}",
|
|
||||||
multisig_hash=f"{scripthash}:Global",
|
|
||||||
wallet_password="",
|
|
||||||
)
|
|
||||||
|
|
||||||
m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout)
|
|
||||||
if m is None:
|
|
||||||
raise Exception("Can not get Tx.")
|
|
||||||
tx = m.group(1)
|
|
||||||
if not transaction_accepted(main_chain, tx):
|
|
||||||
raise AssertionError(f"TX {tx} hasn't been processed")
|
|
||||||
|
|
||||||
|
|
||||||
def transaction_accepted(main_chain: MainChain, tx_id: str):
|
|
||||||
"""
|
"""
|
||||||
This function returns True in case of accepted TX.
|
This function returns True in case of accepted TX.
|
||||||
Args:
|
Args:
|
||||||
|
@ -79,7 +52,8 @@ def transaction_accepted(main_chain: MainChain, tx_id: str):
|
||||||
try:
|
try:
|
||||||
for _ in range(0, TX_PERSIST_TIMEOUT):
|
for _ in range(0, TX_PERSIST_TIMEOUT):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
resp = main_chain.rpc_client.get_transaction_height(tx_id)
|
neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE)
|
||||||
|
resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint())
|
||||||
if resp is not None:
|
if resp is not None:
|
||||||
logger.info(f"TX is accepted in block: {resp}")
|
logger.info(f"TX is accepted in block: {resp}")
|
||||||
return True, resp
|
return True, resp
|
||||||
|
@ -89,7 +63,7 @@ def transaction_accepted(main_chain: MainChain, tx_id: str):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get FrostFS Balance")
|
@reporter.step("Get FrostFS Balance")
|
||||||
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
|
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
|
||||||
"""
|
"""
|
||||||
This function returns FrostFS balance for given wallet.
|
This function returns FrostFS balance for given wallet.
|
||||||
|
@ -110,11 +84,11 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_
|
||||||
raise out
|
raise out
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Transfer Gas")
|
@reporter.step("Transfer Gas")
|
||||||
def transfer_gas(
|
def transfer_gas(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
amount: int,
|
amount: int,
|
||||||
main_chain: MainChain,
|
morph_chain: MorphChain,
|
||||||
wallet_from_path: Optional[str] = None,
|
wallet_from_path: Optional[str] = None,
|
||||||
wallet_from_password: Optional[str] = None,
|
wallet_from_password: Optional[str] = None,
|
||||||
address_from: Optional[str] = None,
|
address_from: Optional[str] = None,
|
||||||
|
@ -137,22 +111,16 @@ def transfer_gas(
|
||||||
address_to: The address of the wallet to transfer assets to.
|
address_to: The address of the wallet to transfer assets to.
|
||||||
amount: Amount of gas to transfer.
|
amount: Amount of gas to transfer.
|
||||||
"""
|
"""
|
||||||
wallet_from_path = wallet_from_path or main_chain.get_wallet_path()
|
wallet_from_path = wallet_from_path or morph_chain.get_wallet_path()
|
||||||
wallet_from_password = (
|
wallet_from_password = (
|
||||||
wallet_from_password
|
wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password()
|
||||||
if wallet_from_password is not None
|
|
||||||
else main_chain.get_wallet_password()
|
|
||||||
)
|
|
||||||
address_from = address_from or wallet_utils.get_last_address_from_wallet(
|
|
||||||
wallet_from_path, wallet_from_password
|
|
||||||
)
|
|
||||||
address_to = address_to or wallet_utils.get_last_address_from_wallet(
|
|
||||||
wallet_to_path, wallet_to_password
|
|
||||||
)
|
)
|
||||||
|
address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password)
|
||||||
|
address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password)
|
||||||
|
|
||||||
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
||||||
out = neogo.nep17.transfer(
|
out = neogo.nep17.transfer(
|
||||||
rpc_endpoint=main_chain.get_endpoint(),
|
rpc_endpoint=morph_chain.get_endpoint(),
|
||||||
wallet=wallet_from_path,
|
wallet=wallet_from_path,
|
||||||
wallet_password=wallet_from_password,
|
wallet_password=wallet_from_password,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -164,50 +132,12 @@ def transfer_gas(
|
||||||
txid = out.stdout.strip().split("\n")[-1]
|
txid = out.stdout.strip().split("\n")[-1]
|
||||||
if len(txid) != 64:
|
if len(txid) != 64:
|
||||||
raise Exception("Got no TXID after run the command")
|
raise Exception("Got no TXID after run the command")
|
||||||
if not transaction_accepted(main_chain, txid):
|
if not transaction_accepted(morph_chain, txid):
|
||||||
raise AssertionError(f"TX {txid} hasn't been processed")
|
raise AssertionError(f"TX {txid} hasn't been processed")
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("FrostFS Deposit")
|
@reporter.step("Get Sidechain Balance")
|
||||||
def deposit_gas(
|
|
||||||
shell: Shell,
|
|
||||||
main_chain: MainChain,
|
|
||||||
amount: int,
|
|
||||||
wallet_from_path: str,
|
|
||||||
wallet_from_password: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Transferring GAS from given wallet to FrostFS contract address.
|
|
||||||
"""
|
|
||||||
# get FrostFS contract address
|
|
||||||
deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT)
|
|
||||||
logger.info(f"FrostFS contract address: {deposit_addr}")
|
|
||||||
address_from = wallet_utils.get_last_address_from_wallet(
|
|
||||||
wallet_path=wallet_from_path, wallet_password=wallet_from_password
|
|
||||||
)
|
|
||||||
transfer_gas(
|
|
||||||
shell=shell,
|
|
||||||
main_chain=main_chain,
|
|
||||||
amount=amount,
|
|
||||||
wallet_from_path=wallet_from_path,
|
|
||||||
wallet_from_password=wallet_from_password,
|
|
||||||
address_to=deposit_addr,
|
|
||||||
address_from=address_from,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Mainnet Balance")
|
|
||||||
def get_mainnet_balance(main_chain: MainChain, address: str):
|
|
||||||
resp = main_chain.rpc_client.get_nep17_balances(address=address)
|
|
||||||
logger.info(f"Got getnep17balances response: {resp}")
|
|
||||||
for balance in resp["balance"]:
|
|
||||||
if balance["assethash"] == GAS_HASH:
|
|
||||||
return float(balance["amount"]) / ASSET_POWER_MAINCHAIN
|
|
||||||
return float(0)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Sidechain Balance")
|
|
||||||
def get_sidechain_balance(morph_chain: MorphChain, address: str):
|
def get_sidechain_balance(morph_chain: MorphChain, address: str):
|
||||||
resp = morph_chain.rpc_client.get_nep17_balances(address=address)
|
resp = morph_chain.rpc_client.get_nep17_balances(address=address)
|
||||||
logger.info(f"Got getnep17balances response: {resp}")
|
logger.info(f"Got getnep17balances response: {resp}")
|
||||||
|
|
|
@ -1,26 +1,22 @@
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import uuid
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from dateutil.parser import parse
|
from dateutil.parser import parse
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
|
||||||
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
|
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.utils.cli_utils import _run_with_passwd
|
from frostfs_testlib.steps.cli.container import search_nodes_with_container
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Expected all objects are presented in the bucket")
|
@reporter.step("Expected all objects are presented in the bucket")
|
||||||
def check_objects_in_bucket(
|
def check_objects_in_bucket(
|
||||||
s3_client: S3ClientWrapper,
|
s3_client: S3ClientWrapper,
|
||||||
bucket: str,
|
bucket: str,
|
||||||
|
@ -29,37 +25,29 @@ def check_objects_in_bucket(
|
||||||
) -> None:
|
) -> None:
|
||||||
unexpected_objects = unexpected_objects or []
|
unexpected_objects = unexpected_objects or []
|
||||||
bucket_objects = s3_client.list_objects(bucket)
|
bucket_objects = s3_client.list_objects(bucket)
|
||||||
assert len(bucket_objects) == len(
|
assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket"
|
||||||
expected_objects
|
|
||||||
), f"Expected {len(expected_objects)} objects in the bucket"
|
|
||||||
for bucket_object in expected_objects:
|
for bucket_object in expected_objects:
|
||||||
assert (
|
assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}"
|
||||||
bucket_object in bucket_objects
|
|
||||||
), f"Expected object {bucket_object} in objects list {bucket_objects}"
|
|
||||||
|
|
||||||
for bucket_object in unexpected_objects:
|
for bucket_object in unexpected_objects:
|
||||||
assert (
|
assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}"
|
||||||
bucket_object not in bucket_objects
|
|
||||||
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Try to get object and got error")
|
@reporter.step("Try to get object and got error")
|
||||||
def try_to_get_objects_and_expect_error(
|
def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None:
|
||||||
s3_client: S3ClientWrapper, bucket: str, object_keys: list
|
|
||||||
) -> None:
|
|
||||||
for obj in object_keys:
|
for obj in object_keys:
|
||||||
try:
|
try:
|
||||||
s3_client.get_object(bucket, obj)
|
s3_client.get_object(bucket, obj)
|
||||||
raise AssertionError(f"Object {obj} found in bucket {bucket}")
|
raise AssertionError(f"Object {obj} found in bucket {bucket}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
assert "The specified key does not exist" in str(
|
assert "The specified key does not exist" in str(err), f"Expected error in exception {err}"
|
||||||
err
|
|
||||||
), f"Expected error in exception {err}"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'")
|
@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'")
|
||||||
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
|
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
|
||||||
s3_client.get_bucket_versioning_status(bucket)
|
if status == VersioningStatus.UNDEFINED:
|
||||||
|
return
|
||||||
|
|
||||||
s3_client.put_bucket_versioning(bucket, status=status)
|
s3_client.put_bucket_versioning(bucket, status=status)
|
||||||
bucket_status = s3_client.get_bucket_versioning_status(bucket)
|
bucket_status = s3_client.get_bucket_versioning_status(bucket)
|
||||||
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
|
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
|
||||||
|
@ -69,15 +57,9 @@ def object_key_from_file_path(full_path: str) -> str:
|
||||||
return os.path.basename(full_path)
|
return os.path.basename(full_path)
|
||||||
|
|
||||||
|
|
||||||
def assert_tags(
|
def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None:
|
||||||
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
|
expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
|
||||||
) -> None:
|
unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
|
||||||
expected_tags = (
|
|
||||||
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
|
|
||||||
)
|
|
||||||
unexpected_tags = (
|
|
||||||
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
|
|
||||||
)
|
|
||||||
if expected_tags == []:
|
if expected_tags == []:
|
||||||
assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
|
assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
|
||||||
assert len(expected_tags) == len(actual_tags)
|
assert len(expected_tags) == len(actual_tags)
|
||||||
|
@ -87,7 +69,7 @@ def assert_tags(
|
||||||
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
|
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Expected all tags are presented in object")
|
@reporter.step("Expected all tags are presented in object")
|
||||||
def check_tags_by_object(
|
def check_tags_by_object(
|
||||||
s3_client: S3ClientWrapper,
|
s3_client: S3ClientWrapper,
|
||||||
bucket: str,
|
bucket: str,
|
||||||
|
@ -96,12 +78,10 @@ def check_tags_by_object(
|
||||||
unexpected_tags: Optional[list] = None,
|
unexpected_tags: Optional[list] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
actual_tags = s3_client.get_object_tagging(bucket, key)
|
actual_tags = s3_client.get_object_tagging(bucket, key)
|
||||||
assert_tags(
|
assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags)
|
||||||
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Expected all tags are presented in bucket")
|
@reporter.step("Expected all tags are presented in bucket")
|
||||||
def check_tags_by_bucket(
|
def check_tags_by_bucket(
|
||||||
s3_client: S3ClientWrapper,
|
s3_client: S3ClientWrapper,
|
||||||
bucket: str,
|
bucket: str,
|
||||||
|
@ -109,9 +89,7 @@ def check_tags_by_bucket(
|
||||||
unexpected_tags: Optional[list] = None,
|
unexpected_tags: Optional[list] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
actual_tags = s3_client.get_bucket_tagging(bucket)
|
actual_tags = s3_client.get_bucket_tagging(bucket)
|
||||||
assert_tags(
|
assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags)
|
||||||
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def assert_object_lock_mode(
|
def assert_object_lock_mode(
|
||||||
|
@ -124,105 +102,49 @@ def assert_object_lock_mode(
|
||||||
retain_period: Optional[int] = None,
|
retain_period: Optional[int] = None,
|
||||||
):
|
):
|
||||||
object_dict = s3_client.get_object(bucket, file_name, full_output=True)
|
object_dict = s3_client.get_object(bucket, file_name, full_output=True)
|
||||||
assert (
|
assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}"
|
||||||
object_dict.get("ObjectLockMode") == object_lock_mode
|
|
||||||
), f"Expected Object Lock Mode is {object_lock_mode}"
|
|
||||||
assert (
|
assert (
|
||||||
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
|
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
|
||||||
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
|
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
|
||||||
object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
|
object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
|
||||||
retain_date = (
|
retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
|
||||||
parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
|
|
||||||
)
|
|
||||||
if retain_until_date:
|
if retain_until_date:
|
||||||
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime(
|
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime(
|
||||||
"%Y-%m-%dT%H:%M:%S"
|
"%Y-%m-%dT%H:%M:%S"
|
||||||
), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
|
), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
|
||||||
elif retain_period:
|
elif retain_period:
|
||||||
last_modify_date = object_dict.get("LastModified")
|
last_modify_date = object_dict.get("LastModified")
|
||||||
last_modify = (
|
last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
|
||||||
parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
|
|
||||||
)
|
|
||||||
assert (
|
assert (
|
||||||
retain_date - last_modify + timedelta(seconds=1)
|
retain_date - last_modify + timedelta(seconds=1)
|
||||||
).days == retain_period, f"Expected retention period is {retain_period} days"
|
).days == retain_period, f"Expected retention period is {retain_period} days"
|
||||||
|
|
||||||
|
|
||||||
def assert_s3_acl(acl_grants: list, permitted_users: str):
|
def _format_grants_as_strings(grants: list[dict]) -> list:
|
||||||
if permitted_users == "AllUsers":
|
grantee_format = "{g_type}::{uri}:{permission}"
|
||||||
grantees = {"AllUsers": 0, "CanonicalUser": 0}
|
return set(
|
||||||
for acl_grant in acl_grants:
|
[
|
||||||
if acl_grant.get("Grantee", {}).get("Type") == "Group":
|
grantee_format.format(
|
||||||
uri = acl_grant.get("Grantee", {}).get("URI")
|
g_type=grant.get("Grantee", {}).get("Type", ""),
|
||||||
permission = acl_grant.get("Permission")
|
uri=grant.get("Grantee", {}).get("URI", ""),
|
||||||
assert (uri, permission) == (
|
permission=grant.get("Permission", ""),
|
||||||
"http://acs.amazonaws.com/groups/global/AllUsers",
|
)
|
||||||
"FULL_CONTROL",
|
for grant in grants
|
||||||
), "All Groups should have FULL_CONTROL"
|
]
|
||||||
grantees["AllUsers"] += 1
|
|
||||||
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
|
|
||||||
permission = acl_grant.get("Permission")
|
|
||||||
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
|
|
||||||
grantees["CanonicalUser"] += 1
|
|
||||||
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
|
|
||||||
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
|
|
||||||
|
|
||||||
if permitted_users == "CanonicalUser":
|
|
||||||
for acl_grant in acl_grants:
|
|
||||||
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
|
|
||||||
permission = acl_grant.get("Permission")
|
|
||||||
assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
|
|
||||||
else:
|
|
||||||
logger.error("FULL_CONTROL is given to All Users")
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Init S3 Credentials")
|
|
||||||
def init_s3_credentials(
|
|
||||||
wallet_path: str,
|
|
||||||
cluster: Cluster,
|
|
||||||
s3_bearer_rules_file: str,
|
|
||||||
policy: Optional[dict] = None,
|
|
||||||
):
|
|
||||||
bucket = str(uuid.uuid4())
|
|
||||||
|
|
||||||
s3gate_node = cluster.services(S3Gate)[0]
|
|
||||||
gate_public_key = s3gate_node.get_wallet_public_key()
|
|
||||||
cmd = (
|
|
||||||
f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
|
|
||||||
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
|
|
||||||
f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} "
|
|
||||||
f"--bearer-rules {s3_bearer_rules_file}"
|
|
||||||
)
|
)
|
||||||
if policy:
|
|
||||||
cmd += f" --container-policy {policy}'"
|
|
||||||
logger.info(f"Executing command: {cmd}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
output = _run_with_passwd(cmd)
|
|
||||||
logger.info(f"Command completed with output: {output}")
|
|
||||||
|
|
||||||
# output contains some debug info and then several JSON structures, so we find each
|
|
||||||
# JSON structure by curly brackets (naive approach, but works while JSON is not nested)
|
|
||||||
# and then we take JSON containing secret_access_key
|
|
||||||
json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL)
|
|
||||||
for json_block in json_blocks:
|
|
||||||
try:
|
|
||||||
parsed_json_block = json.loads(json_block)
|
|
||||||
if "secret_access_key" in parsed_json_block:
|
|
||||||
return (
|
|
||||||
parsed_json_block["container_id"],
|
|
||||||
parsed_json_block["access_key_id"],
|
|
||||||
parsed_json_block["secret_access_key"],
|
|
||||||
)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
raise AssertionError(f"Could not parse info from output\n{output}")
|
|
||||||
raise AssertionError(f"Could not find AWS credentials in output:\n{output}")
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Delete bucket with all objects")
|
@reporter.step("Verify ACL permissions")
|
||||||
|
def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True):
|
||||||
|
actual_grants = _format_grants_as_strings(actual_acl_grants)
|
||||||
|
expected_grants = _format_grants_as_strings(expected_acl_grants)
|
||||||
|
|
||||||
|
assert expected_grants <= actual_grants, "Permissions mismatch"
|
||||||
|
if strict:
|
||||||
|
assert expected_grants == actual_grants, "Extra permissions found, must not be there"
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Delete bucket with all objects")
|
||||||
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
|
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
|
||||||
versioning_status = s3_client.get_bucket_versioning_status(bucket)
|
versioning_status = s3_client.get_bucket_versioning_status(bucket)
|
||||||
if versioning_status == VersioningStatus.ENABLED.value:
|
if versioning_status == VersioningStatus.ENABLED.value:
|
||||||
|
@ -245,3 +167,21 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
|
||||||
|
|
||||||
# Delete the bucket itself
|
# Delete the bucket itself
|
||||||
s3_client.delete_bucket(bucket)
|
s3_client.delete_bucket(bucket)
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Search nodes bucket")
|
||||||
|
def search_nodes_with_bucket(
|
||||||
|
cluster: Cluster,
|
||||||
|
bucket_name: str,
|
||||||
|
wallet: WalletInfo,
|
||||||
|
shell: Shell,
|
||||||
|
endpoint: str,
|
||||||
|
bucket_container_resolver: BucketContainerResolver,
|
||||||
|
) -> list[ClusterNode]:
|
||||||
|
cid = None
|
||||||
|
for cluster_node in cluster.cluster_nodes:
|
||||||
|
cid = bucket_container_resolver.resolve(cluster_node, bucket_name)
|
||||||
|
if cid:
|
||||||
|
break
|
||||||
|
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
|
||||||
|
return nodes_list
|
||||||
|
|
|
@ -4,19 +4,18 @@ import logging
|
||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from enum import Enum
|
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
from frostfs_testlib.utils import json_utils, wallet_utils
|
from frostfs_testlib.utils import json_utils, wallet_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
UNRELATED_KEY = "unrelated key in the session"
|
UNRELATED_KEY = "unrelated key in the session"
|
||||||
|
@ -26,7 +25,7 @@ WRONG_VERB = "wrong verb of the session"
|
||||||
INVALID_SIGNATURE = "invalid signature of the session data"
|
INVALID_SIGNATURE = "invalid signature of the session data"
|
||||||
|
|
||||||
|
|
||||||
class ObjectVerb(Enum):
|
class ObjectVerb(HumanReadableEnum):
|
||||||
PUT = "PUT"
|
PUT = "PUT"
|
||||||
DELETE = "DELETE"
|
DELETE = "DELETE"
|
||||||
GET = "GET"
|
GET = "GET"
|
||||||
|
@ -36,7 +35,7 @@ class ObjectVerb(Enum):
|
||||||
SEARCH = "SEARCH"
|
SEARCH = "SEARCH"
|
||||||
|
|
||||||
|
|
||||||
class ContainerVerb(Enum):
|
class ContainerVerb(HumanReadableEnum):
|
||||||
CREATE = "PUT"
|
CREATE = "PUT"
|
||||||
DELETE = "DELETE"
|
DELETE = "DELETE"
|
||||||
SETEACL = "SETEACL"
|
SETEACL = "SETEACL"
|
||||||
|
@ -49,7 +48,7 @@ class Lifetime:
|
||||||
iat: int = 0
|
iat: int = 0
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Generate Session Token")
|
@reporter.step("Generate Session Token")
|
||||||
def generate_session_token(
|
def generate_session_token(
|
||||||
owner_wallet: WalletInfo,
|
owner_wallet: WalletInfo,
|
||||||
session_wallet: WalletInfo,
|
session_wallet: WalletInfo,
|
||||||
|
@ -71,9 +70,7 @@ def generate_session_token(
|
||||||
|
|
||||||
file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
|
file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
|
||||||
|
|
||||||
pub_key_64 = wallet_utils.get_wallet_public_key(
|
pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64")
|
||||||
session_wallet.path, session_wallet.password, "base64"
|
|
||||||
)
|
|
||||||
|
|
||||||
lifetime = lifetime or Lifetime()
|
lifetime = lifetime or Lifetime()
|
||||||
|
|
||||||
|
@ -98,7 +95,7 @@ def generate_session_token(
|
||||||
return file_path
|
return file_path
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Generate Session Token For Container")
|
@reporter.step("Generate Session Token For Container")
|
||||||
def generate_container_session_token(
|
def generate_container_session_token(
|
||||||
owner_wallet: WalletInfo,
|
owner_wallet: WalletInfo,
|
||||||
session_wallet: WalletInfo,
|
session_wallet: WalletInfo,
|
||||||
|
@ -125,11 +122,7 @@ def generate_container_session_token(
|
||||||
"container": {
|
"container": {
|
||||||
"verb": verb.value,
|
"verb": verb.value,
|
||||||
"wildcard": cid is None,
|
"wildcard": cid is None,
|
||||||
**(
|
**({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}),
|
||||||
{"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}}
|
|
||||||
if cid is not None
|
|
||||||
else {}
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +135,7 @@ def generate_container_session_token(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Generate Session Token For Object")
|
@reporter.step("Generate Session Token For Object")
|
||||||
def generate_object_session_token(
|
def generate_object_session_token(
|
||||||
owner_wallet: WalletInfo,
|
owner_wallet: WalletInfo,
|
||||||
session_wallet: WalletInfo,
|
session_wallet: WalletInfo,
|
||||||
|
@ -184,7 +177,7 @@ def generate_object_session_token(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get signed token for container session")
|
@reporter.step("Get signed token for container session")
|
||||||
def get_container_signed_token(
|
def get_container_signed_token(
|
||||||
owner_wallet: WalletInfo,
|
owner_wallet: WalletInfo,
|
||||||
user_wallet: WalletInfo,
|
user_wallet: WalletInfo,
|
||||||
|
@ -206,7 +199,7 @@ def get_container_signed_token(
|
||||||
return sign_session_token(shell, session_token_file, owner_wallet)
|
return sign_session_token(shell, session_token_file, owner_wallet)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get signed token for object session")
|
@reporter.step("Get signed token for object session")
|
||||||
def get_object_signed_token(
|
def get_object_signed_token(
|
||||||
owner_wallet: WalletInfo,
|
owner_wallet: WalletInfo,
|
||||||
user_wallet: WalletInfo,
|
user_wallet: WalletInfo,
|
||||||
|
@ -233,12 +226,11 @@ def get_object_signed_token(
|
||||||
return sign_session_token(shell, session_token_file, owner_wallet)
|
return sign_session_token(shell, session_token_file, owner_wallet)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Create Session Token")
|
@reporter.step("Create Session Token")
|
||||||
def create_session_token(
|
def create_session_token(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
owner: str,
|
owner: str,
|
||||||
wallet_path: str,
|
wallet: WalletInfo,
|
||||||
wallet_password: str,
|
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
|
@ -253,19 +245,18 @@ def create_session_token(
|
||||||
The path to the generated session token file.
|
The path to the generated session token file.
|
||||||
"""
|
"""
|
||||||
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||||
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
|
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
frostfscli.session.create(
|
frostfscli.session.create(
|
||||||
rpc_endpoint=rpc_endpoint,
|
rpc_endpoint=rpc_endpoint,
|
||||||
address=owner,
|
address=owner,
|
||||||
wallet=wallet_path,
|
|
||||||
wallet_password=wallet_password,
|
|
||||||
out=session_token,
|
out=session_token,
|
||||||
|
wallet=wallet.path,
|
||||||
)
|
)
|
||||||
return session_token
|
return session_token
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Sign Session Token")
|
@reporter.step("Sign Session Token")
|
||||||
def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
|
def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str:
|
||||||
"""
|
"""
|
||||||
This function signs the session token by the given wallet.
|
This function signs the session token by the given wallet.
|
||||||
|
|
||||||
|
@ -278,10 +269,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -
|
||||||
The path to the signed token.
|
The path to the signed token.
|
||||||
"""
|
"""
|
||||||
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||||
frostfscli = FrostfsCli(
|
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||||
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
|
frostfscli.util.sign_session_token(session_token_file, signed_token_file)
|
||||||
)
|
|
||||||
frostfscli.util.sign_session_token(
|
|
||||||
wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file
|
|
||||||
)
|
|
||||||
return signed_token_file
|
return signed_token_file
|
||||||
|
|
|
@ -3,7 +3,7 @@ from time import sleep
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED
|
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, get_object
|
from frostfs_testlib.steps.cli.object import delete_object, get_object
|
||||||
|
@ -12,16 +12,13 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
CLEANUP_TIMEOUT = 10
|
CLEANUP_TIMEOUT = 10
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Delete Objects")
|
@reporter.step("Delete Objects")
|
||||||
def delete_objects(
|
def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None:
|
||||||
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
|
|
||||||
) -> None:
|
|
||||||
"""
|
"""
|
||||||
Deletes given storage objects.
|
Deletes given storage objects.
|
||||||
|
|
||||||
|
@ -33,14 +30,14 @@ def delete_objects(
|
||||||
with reporter.step("Delete objects"):
|
with reporter.step("Delete objects"):
|
||||||
for storage_object in storage_objects:
|
for storage_object in storage_objects:
|
||||||
storage_object.tombstone = delete_object(
|
storage_object.tombstone = delete_object(
|
||||||
storage_object.wallet_file_path,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
storage_object.oid,
|
storage_object.oid,
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=cluster.default_rpc_endpoint,
|
endpoint=cluster.default_rpc_endpoint,
|
||||||
)
|
)
|
||||||
verify_head_tombstone(
|
verify_head_tombstone(
|
||||||
wallet_path=storage_object.wallet_file_path,
|
wallet=storage_object.wallet,
|
||||||
cid=storage_object.cid,
|
cid=storage_object.cid,
|
||||||
oid_ts=storage_object.tombstone,
|
oid_ts=storage_object.tombstone,
|
||||||
oid=storage_object.oid,
|
oid=storage_object.oid,
|
||||||
|
@ -55,7 +52,7 @@ def delete_objects(
|
||||||
for storage_object in storage_objects:
|
for storage_object in storage_objects:
|
||||||
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
||||||
get_object(
|
get_object(
|
||||||
storage_object.wallet_file_path,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
storage_object.oid,
|
storage_object.oid,
|
||||||
shell=shell,
|
shell=shell,
|
||||||
|
|
|
@ -6,22 +6,21 @@
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import head_object
|
from frostfs_testlib.steps.cli.object import head_object
|
||||||
from frostfs_testlib.steps.complex_object_actions import get_last_object
|
from frostfs_testlib.steps.complex_object_actions import get_last_object
|
||||||
from frostfs_testlib.storage.cluster import StorageNode
|
from frostfs_testlib.storage.cluster import StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.utils import string_utils
|
from frostfs_testlib.utils import string_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Object Copies")
|
# TODO: Unused, remove or make use of
|
||||||
def get_object_copies(
|
@reporter.step("Get Object Copies")
|
||||||
complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
||||||
) -> int:
|
|
||||||
"""
|
"""
|
||||||
The function performs requests to all nodes of the container and
|
The function performs requests to all nodes of the container and
|
||||||
finds out if they store a copy of the object. The procedure is
|
finds out if they store a copy of the object. The procedure is
|
||||||
|
@ -45,10 +44,8 @@ def get_object_copies(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Simple Object Copies")
|
@reporter.step("Get Simple Object Copies")
|
||||||
def get_simple_object_copies(
|
def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
||||||
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
|
||||||
) -> int:
|
|
||||||
"""
|
"""
|
||||||
To figure out the number of a simple object copies, only direct
|
To figure out the number of a simple object copies, only direct
|
||||||
HEAD requests should be made to the every node of the container.
|
HEAD requests should be made to the every node of the container.
|
||||||
|
@ -66,9 +63,7 @@ def get_simple_object_copies(
|
||||||
copies = 0
|
copies = 0
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
try:
|
try:
|
||||||
response = head_object(
|
response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True)
|
||||||
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
|
|
||||||
)
|
|
||||||
if response:
|
if response:
|
||||||
logger.info(f"Found object {oid} on node {node}")
|
logger.info(f"Found object {oid} on node {node}")
|
||||||
copies += 1
|
copies += 1
|
||||||
|
@ -78,10 +73,8 @@ def get_simple_object_copies(
|
||||||
return copies
|
return copies
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Complex Object Copies")
|
@reporter.step("Get Complex Object Copies")
|
||||||
def get_complex_object_copies(
|
def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
||||||
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
|
||||||
) -> int:
|
|
||||||
"""
|
"""
|
||||||
To figure out the number of a complex object copies, we firstly
|
To figure out the number of a complex object copies, we firstly
|
||||||
need to retrieve its Last object. We consider that the number of
|
need to retrieve its Last object. We consider that the number of
|
||||||
|
@ -102,10 +95,8 @@ def get_complex_object_copies(
|
||||||
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
|
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Nodes With Object")
|
@reporter.step("Get Nodes With Object")
|
||||||
def get_nodes_with_object(
|
def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]:
|
||||||
cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
|
||||||
) -> list[StorageNode]:
|
|
||||||
"""
|
"""
|
||||||
The function returns list of nodes which store
|
The function returns list of nodes which store
|
||||||
the given object.
|
the given object.
|
||||||
|
@ -120,8 +111,7 @@ def get_nodes_with_object(
|
||||||
|
|
||||||
nodes_list = []
|
nodes_list = []
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
wallet = node.get_wallet_path()
|
wallet = WalletInfo.from_node(node)
|
||||||
wallet_config = node.get_wallet_config_path()
|
|
||||||
try:
|
try:
|
||||||
res = head_object(
|
res = head_object(
|
||||||
wallet,
|
wallet,
|
||||||
|
@ -130,7 +120,6 @@ def get_nodes_with_object(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=node.get_rpc_endpoint(),
|
endpoint=node.get_rpc_endpoint(),
|
||||||
is_direct=True,
|
is_direct=True,
|
||||||
wallet_config=wallet_config,
|
|
||||||
)
|
)
|
||||||
if res is not None:
|
if res is not None:
|
||||||
logger.info(f"Found object {oid} on node {node}")
|
logger.info(f"Found object {oid} on node {node}")
|
||||||
|
@ -141,10 +130,8 @@ def get_nodes_with_object(
|
||||||
return nodes_list
|
return nodes_list
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Nodes Without Object")
|
@reporter.step("Get Nodes Without Object")
|
||||||
def get_nodes_without_object(
|
def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]:
|
||||||
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
|
||||||
) -> list[StorageNode]:
|
|
||||||
"""
|
"""
|
||||||
The function returns list of nodes which do not store
|
The function returns list of nodes which do not store
|
||||||
the given object.
|
the given object.
|
||||||
|
@ -160,9 +147,7 @@ def get_nodes_without_object(
|
||||||
nodes_list = []
|
nodes_list = []
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
try:
|
try:
|
||||||
res = head_object(
|
res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True)
|
||||||
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
|
|
||||||
)
|
|
||||||
if res is None:
|
if res is None:
|
||||||
nodes_list.append(node)
|
nodes_list.append(node)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
|
@ -1,41 +1,24 @@
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from neo3.wallet import wallet
|
from frostfs_testlib import reporter
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import head_object
|
from frostfs_testlib.steps.cli.object import head_object
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
|
||||||
reporter = get_reporter()
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Verify Head Tombstone")
|
@reporter.step("Verify Head Tombstone")
|
||||||
def verify_head_tombstone(
|
def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str):
|
||||||
wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
|
header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
|
||||||
):
|
|
||||||
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
|
|
||||||
|
|
||||||
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
|
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
|
||||||
logger.info(f"Header Session OIDs is {s_oid}")
|
logger.info(f"Header Session OIDs is {s_oid}")
|
||||||
logger.info(f"OID is {oid}")
|
logger.info(f"OID is {oid}")
|
||||||
|
|
||||||
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
|
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
|
||||||
|
assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong"
|
||||||
with open(wallet_path, "r") as file:
|
|
||||||
wlt_data = json.loads(file.read())
|
|
||||||
wlt = wallet.Wallet.from_json(wlt_data, password="")
|
|
||||||
addr = wlt.accounts[0].address
|
|
||||||
|
|
||||||
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
|
|
||||||
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
|
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
|
||||||
assert (
|
assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE"
|
||||||
header["sessionToken"]["body"]["object"]["verb"] == "DELETE"
|
assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong"
|
||||||
), "Header Session Type isn't DELETE"
|
assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong"
|
||||||
assert (
|
|
||||||
header["sessionToken"]["body"]["object"]["target"]["container"] == cid
|
|
||||||
), "Header Session ID is wrong"
|
|
||||||
assert (
|
|
||||||
oid in header["sessionToken"]["body"]["object"]["target"]["objects"]
|
|
||||||
), "Header Session OID is wrong"
|
|
||||||
|
|
|
@ -1,25 +1,7 @@
|
||||||
from frostfs_testlib.storage.constants import _FrostfsServicesNames
|
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
|
||||||
HTTPGate,
|
|
||||||
InnerRing,
|
|
||||||
MainChain,
|
|
||||||
MorphChain,
|
|
||||||
S3Gate,
|
|
||||||
StorageNode,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
||||||
|
|
||||||
__class_registry = ServiceRegistry()
|
__class_registry = ServiceRegistry()
|
||||||
|
|
||||||
# Register default public services
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode)
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing)
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain)
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate)
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate)
|
|
||||||
# # TODO: Remove this since we are no longer have main chain
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain)
|
|
||||||
|
|
||||||
|
|
||||||
def get_service_registry() -> ServiceRegistry:
|
def get_service_registry() -> ServiceRegistry:
|
||||||
"""Returns registry with registered classes related to cluster and cluster nodes.
|
"""Returns registry with registered classes related to cluster and cluster nodes.
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue