Compare commits

..

No commits in common. "master" and "empty" have entirely different histories.

198 changed files with 2 additions and 26241 deletions

View file

@ -1,109 +0,0 @@
hosts:
- address: localhost
hostname: localhost
attributes:
sudo_shell: false
plugin_name: docker
healthcheck_plugin_name: basic
attributes:
skip_readiness_check: True
force_transactions: True
services:
- name: frostfs-storage_01
attributes:
container_name: s01
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
endpoint_data0: s01.frostfs.devenv:8080
control_endpoint: s01.frostfs.devenv:8081
un_locode: "RU MOW"
- name: frostfs-storage_02
attributes:
container_name: s02
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
endpoint_data0: s02.frostfs.devenv:8080
control_endpoint: s02.frostfs.devenv:8081
un_locode: "RU LED"
- name: frostfs-storage_03
attributes:
container_name: s03
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
endpoint_data0: s03.frostfs.devenv:8080
control_endpoint: s03.frostfs.devenv:8081
un_locode: "SE STO"
- name: frostfs-storage_04
attributes:
container_name: s04
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
endpoint_data0: s04.frostfs.devenv:8080
control_endpoint: s04.frostfs.devenv:8081
un_locode: "FI HEL"
- name: frostfs-s3_01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: frostfs-http_01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: frostfs-ir_01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
wallet_path: ../frostfs-dev-env/services/ir/az.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: neo-go_01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain_01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns_01
attributes:
container_name: coredns
clis:
- name: frostfs-cli
exec_path: frostfs-cli

View file

@ -1,21 +0,0 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

14
.gitignore vendored
View file

@ -1,14 +0,0 @@
# ignore IDE files
.vscode
.idea
venv.*
# ignore temp files under any path
.DS_Store
**/__pycache__
# ignore build artifacts
/dist
/build
*.egg-info
wallet_config.yml

View file

@ -1,11 +0,0 @@
repos:
- repo: https://github.com/psf/black
rev: 22.8.0
hooks:
- id: black
language_version: python3.10
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/qa-committers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -1,225 +0,0 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests, and make sure the test suite passes locally.
- Open a pull request, and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving feedback, amend your commits or add new ones as appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-testlib` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your Git Repository
Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source
repository to your own personal repository. Copy the URL of your fork and clone it:
```shell
$ git clone <url of your fork>
```
### Set up git remote as ``upstream``
```shell
$ cd frostfs-testlib
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib
$ git fetch upstream
```
### Set up development environment
To setup development environment for `frostfs-testlib`, please, take the following steps:
```shell
$ make venv
$ source venv.frostfs-testlib/bin/activate
```
Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well.
* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html).
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name branch in
`<type>/<issue>-<changes_topic>` format.
```shell
$ git checkout -b feature/123-something_awesome
```
### Test your changes
Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command:
```shell
$ make validation
```
To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests:
```
SSH_SHELL_HOST = <address of the server>
SSH_SHELL_LOGIN = <login that has permissions to run python3 on the server>
SSH_SHELL_PRIVATE_KEY_PATH = <path to SSH private key on your machine>
SSH_SHELL_PRIVATE_KEY_PASSPHRASE = <passphrase for the SSH private key>
```
### Commit changes
After verification, commit your changes. There is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] Summary
Description
<Macros>
<Sign-Off>
```
```shell
$ git commit -am '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork):
```shell
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via Git. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we are requiring
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
```
This can easily be done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
## Code Style
We use `black` and `isort` for code formatting. Please, refer to [Black code style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) for details.
Type hints are mandatory for library's code:
- class attributes;
- function or method's parameters;
- function or method's return type.
The only exception is return type of test functions or methods - there's no much use in specifying `None` as return type for each test function.
Do not use relative imports. Even if the module is in the same package, use the full package name.
To format docstrings, please, use [Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type annotations should be specified in the code and not in docstrings (please, refer to [this sample](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#type-annotations)).
## Editable installation
If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `frostfs-testlib` directory might be different on your machine):
```shell
$ pip install -e ../frostfs-testlib
```
# Maintaining guide
## Versioning
We follow [Semantic Versioning Specification](https://semver.org/) to version this library. To manage version number in the source code, we use [bumpver](https://pypi.org/project/bumpver/) package.
To update a version of the library, please, take the following steps:
1. Make sure that your have no pending changes in git.
2. Run the following command to update version and commit it to git:
```shell
$ bumpver update --major # to update major version
$ bumpver update --minor # to update minor version
$ bumpver update --patch # to update the patch component of the version
```
3. Sign-off the created commit:
```shell
$ git commit --amend --signoff
```
4. Push the changes to remote.
5. After this commit is merged to upstream, create a tag on the master branch of upstream. Tag name should be formatted as "v{new_version}":
```shell
$ git tag v<new_version>
$ git push upstream v<new_version>
```
## Building and publishing package
To build Python package of the library, please run the following command in the library root directory:
```shell
$ python -m build
```
This command will put wheel file and source archive under `dist` directory.
To check that package description will be correctly rendered at PyPI, please, use command:
```shell
$ twine check dist/*
```
To upload package to [test PyPI](https://test.pypi.org/project/frostfs-testlib/), please, use command:
```shell
$ twine upload -r testpypi dist/*
```
It will prompt for your username and password. You would need to [create test PyPI account](https://test.pypi.org/account/register/) in order to execute it.
To upload package to actual PyPI, please, use command:
```shell
$ twine upload dist/*
```
It will prompt for your username and password. You would need to [create PyPI account](https://pypi.org/account/register/) in order to execute it.

674
LICENSE
View file

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View file

@ -1,50 +0,0 @@
SHELL := /bin/bash
PYTHON_VERSION := 3.10
VENV_NAME := frostfs-testlib
VENV_DIR := venv.${VENV_NAME}
current_dir := $(shell pwd)
DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/)))
FROM_VENV := . ${VENV_DIR}/bin/activate &&
venv: create requirements paths precommit
@echo Ready
precommit:
@echo Isntalling pre-commit hooks
. ${VENV_DIR}/bin/activate && pre-commit install
paths:
@echo Append paths for project
@echo Virtual environment: ${current_dir}/${VENV_DIR}
@rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
create: ${VENV_DIR}
${VENV_DIR}:
@echo Create virtual environment ${current_dir}/${VENV_DIR}
virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR}
requirements:
@echo Isntalling pip requirements
. ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt
#### VALIDATION SECTION ####
lint: create requirements
${FROM_VENV} pylint --disable R,C,W ./src
unit_test:
@echo Starting unit tests
${FROM_VENV} python -m pytest tests
.PHONY: lint_dependent $(DIRECTORIES)
lint_dependent: $(DIRECTORIES)
$(DIRECTORIES):
@echo checking dependent repo $@
$(MAKE) validation -C $@
validation: lint unit_test lint_dependent

View file

@ -1,95 +1,3 @@
# frostfs-testlib
This library provides building blocks and utilities to facilitate development of automated tests for FrostFS system.
# WIP area: this repo is just a fork!
## Installation
Library can be installed via pip:
```shell
$ pip install frostfs-testlib
```
## Configuration
Some library components support configuration that allows dynamic loading of extensions via plugins. Configuration of such components is described in this section.
### Reporter Configuration
Reporter is a singleton component that is used by the library to store test artifacts.
Reporter sends artifacts to handlers that are responsible for actual storing in particular system. By default reporter is initialized without any handlers and won't take any actions to store the artifacts. To add handlers directly via code you can use method `register_handler`:
```python
from frostfs_testlib.reporter import AllureHandler, get_reporter
get_reporter().register_handler(AllureHandler())
```
This registration should happen early at the test session, because any artifacts produced before handler is registered won't be stored anywhere.
Alternative approach for registering handlers is to use method `configure`. It is similar to method [dictConfig](https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig) in a sense that it receives a config structure that describes handlers that should be registered in the reporter. Each handler is defined by it's plugin name; for example, to register the built-in Allure handler, we can use the following config:
```python
get_reporter().configure({ "handlers": [{"plugin_name": "allure"}] })
```
### Hosting Configuration
Hosting component is a class that represents infrastructure (machines/containers/services) where frostFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `frostfs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`:
```python
from frostfs_testlib.hosting import Hosting
hosting = Hosting()
hosting.configure({ "hosts": [{ "address": "localhost", "plugin_name": "docker" ... }]})
```
## Plugins
Testlib uses [entrypoint specification](https://docs.python.org/3/library/importlib.metadata.html) for plugins. Testlib supports the following entrypoint groups for plugins:
- `frostfs.testlib.reporter` - group for reporter handler plugins. Plugin should be a class that implements interface `frostfs_testlib.reporter.interfaces.ReporterHandler`.
### Example reporter plugin
In this example we will consider two Python projects:
- Project "my_frostfs_plugins" where we will build a plugin that extends testlib functionality.
- Project "my_frostfs_tests" that uses "frostfs_testlib" and "my_frostfs_plugins" to build some tests.
Let's say we want to implement some custom reporter handler that can be used as a plugin for testlib. Pseudo-code of implementation can look like that:
```python
# File my_frostfs_plugins/src/foo/bar/custom_handler.py
from contextlib import AbstractContextManager
from frostfs_testlib.reporter import ReporterHandler
class CustomHandler(ReporterHandler):
def step(self, name: str) -> AbstractContextManager:
... some implementation ...
def attach(self, content: Any, file_name: str) -> None:
... some implementation ...
```
Then in the file `pyproject.toml` of "my_frostfs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `frostfs.testlib.reporter`:
```yaml
# File my_frostfs_plugins/pyproject.toml
[project.entry-points."frostfs.testlib.reporter"]
my_custom_handler = "foo.bar.custom_handler:CustomHandler"
```
Finally, to use this handler in our test project "my_frostfs_tests", we should configure reporter with name of the handler plugin:
```python
# File my_frostfs_tests/src/conftest.py
from frostfs_testlib.reporter import get_reporter
get_reporter().configure({ "handlers": [{"plugin_name": "my_custom_handler"}] })
```
Detailed information about registering entrypoints can be found at [setuptools docs](https://setuptools.pypa.io/en/latest/userguide/entry_point.html).
## Library structure
The library provides the following primary components:
* `blockchain` - Contains helpers that allow to interact with neo blockchain, smart contracts, gas transfers, etc.
* `cli` - wrappers on top of frostFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools.
* `hosting` - management of infrastructure (docker, virtual machines, services where frostFS is hosted). The library provides host implementation for docker environment (when frostFS services are running as docker containers). Support for other hosts is provided via plugins.
* `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance.
* `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH.
* `utils` - Support functions.
## Contributing
Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md).
Useful things may be published only in [other branches](../../../branches)

View file

@ -1,95 +0,0 @@
[build-system]
requires = ["setuptools>=65.0.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "frostfs-testlib"
version = "2.0.1"
description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system"
readme = "README.md"
authors = [{ name = "Yadro", email = "info@yadro.com" }]
license = { text = "GNU General Public License v3 (GPLv3)" }
classifiers = [
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
]
keywords = ["frostfs", "test"]
dependencies = [
"allure-python-commons>=2.13.2",
"docker>=4.4.0",
"pyyaml==6.0.1",
"neo-mamba==1.0.0",
"paramiko>=2.10.3",
"pexpect>=4.8.0",
"requests==2.28.1",
"docstring_parser>=0.15",
"testrail-api>=1.12.0",
"pytest==7.1.2",
"tenacity==8.0.1",
"boto3==1.35.30",
"boto3-stubs[s3,iam,sts]==1.35.30",
]
requires-python = ">=3.10"
[project.optional-dependencies]
dev = ["black", "bumpver", "isort", "pre-commit"]
[project.urls]
Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib"
[project.entry-points."frostfs.testlib.reporter"]
allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
[project.entry-points."frostfs.testlib.hosting"]
docker = "frostfs_testlib.hosting.docker_host:DockerHost"
[project.entry-points."frostfs.testlib.healthcheck"]
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
[project.entry-points."frostfs.testlib.csc_managers"]
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
[project.entry-points."frostfs.testlib.services"]
frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
[project.entry-points."frostfs.testlib.credentials_providers"]
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver"
[tool.isort]
profile = "black"
src_paths = ["src", "tests"]
line_length = 140
[tool.black]
line-length = 140
target-version = ["py310"]
[tool.bumpver]
current_version = "2.0.1"
version_pattern = "MAJOR.MINOR.PATCH"
commit_message = "Bump version {old_version} -> {new_version}"
commit = true
tag = false
push = false
[tool.bumpver.file_patterns]
"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"']
"src/frostfs_testlib/__init__.py" = ["{version}"]
[tool.pytest.ini_options]
filterwarnings = [
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
]
testpaths = ["tests"]
[project.entry-points.pytest11]
testlib = "frostfs_testlib"

View file

@ -1,25 +0,0 @@
allure-python-commons==2.13.2
docker==4.4.0
neo-mamba==1.0.0
paramiko==2.10.3
pexpect==4.8.0
requests==2.28.1
docstring_parser==0.15
testrail-api==1.12.0
tenacity==8.0.1
pytest==7.1.2
boto3==1.35.30
boto3-stubs[s3,iam,sts]==1.35.30
pydantic==2.10.6
# Dev dependencies
black==22.8.0
bumpver==2022.1118
isort==5.12.0
pre-commit==2.20.0
pylint==2.17.4
# Packaging dependencies
build==0.8.0
setuptools==65.3.0
twine==4.0.1

View file

@ -1,4 +0,0 @@
__version__ = "2.0.1"
from .fixtures import configure_testlib, hosting, session_start_time, temp_directory
from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems

View file

@ -1,5 +0,0 @@
from frostfs_testlib.analytics import test_case
from frostfs_testlib.analytics.test_case import TestCasePriority
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
from frostfs_testlib.analytics.test_exporter import TСExporter
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter

View file

@ -1,82 +0,0 @@
import allure
from enum import Enum
from types import FunctionType
class TestCasePriority(Enum):
HIGHEST = 0
HIGH = 1
MEDIUM = 2
LOW = 3
def __set_label__(name: str, value: str, allure_decorator: FunctionType = None):
"""
Generic function for do not duplicate set label code in each decorator.
We get decorated function as an object and set needed argument inside.
Args:
name: argument name to set into the function object
value: argument value to set into the function object
allure_decorator: allure decorator to decorate function and do not duplicate decorators with same value
"""
def wrapper(decorated_func):
if allure_decorator:
decorated_func = allure_decorator(value)(decorated_func)
setattr(decorated_func, name, value)
return decorated_func
return wrapper
def id(uuid: str):
"""
Decorator for set test case ID which can be used as unique value due export into TMS.
We prefer to use UUID4 format string for ID.
ID have to be generated manually for each new test.
Args:
uuid: id to set as test_case_id into test function
"""
return __set_label__("__test_case_id__", uuid)
def title(title: str):
"""
Decorator for set test case title / name / summary / short description what we do.
Args:
title: string with title to set into test function
"""
return __set_label__("__test_case_title__", title, allure.title)
def priority(priority: str):
"""
Decorator for set test case title / name / summary / short description what we do.
Args:
priority: string with priority to set into test function
"""
return __set_label__("__test_case_priority__", priority)
def suite_name(name: str):
"""
Decorator for set test case suite name.
Suite name is usually using in TMS for create structure of test cases.
Args:
name: string with test suite name for set into test function
"""
return __set_label__("__test_case_suite_name__", name, allure.story)
def suite_section(name: str):
"""
Decorator for set test case suite section.
Suite section is usually using in TMS for create deep test cases structure.
"""
return __set_label__("__test_case_suite_section__", name)

View file

@ -1,199 +0,0 @@
import re
from docstring_parser import parse
from docstring_parser.common import DocstringStyle
from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType
DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE))
class TestCase:
"""
Test case object implementation for use in collector and exporters
"""
def __init__(
self,
uuid_id: str,
title: str,
description: str,
priority: int,
steps: dict,
params: str,
suite_name: str,
suite_section_name: str,
):
"""
Base constructor for TestCase object
Args:
uuid_id: uuid from id decorator
title: test case title from title decorator
priority: test case priority value (0-3)
steps: list of test case steps read from function __doc__
params: string with test case param read from pytest Function(test) object
suite_name: test case suite name from test_suite decorator
suite_section_name: test case suite section from test_suite_section decorator
"""
# It can confuse, but we rewrite id to "id [params]" string
# We do it in case that one functions can return a lot of tests if we use test params
if params:
self.id = f"{uuid_id} [{params}]"
else:
self.id: str = uuid_id
self.title: str = title
self.description: str = description
self.priority: int = priority
self.steps: dict = steps
self.params: str = params
self.suite_name: str = suite_name
self.suite_section_name: str = suite_section_name
class TestCaseCollector:
"""
Collector working like a plugin for pytest and can be used in collect-only call to get tests list from pytest
Additionally, we have several function to filter tests that can be exported.
"""
pytest_tests = []
def __format_string_with_params__(self, source_string: str, test_params: dict) -> str:
"""
Helper function for format test case string arguments using test params.
Params name can be deep like a.b.c, so we will get the value from tests params.
Additionally, we check is the next object dict or real object to use right call for get next argument.
Args:
source_string: string for format by using test params (if needed)
test_params: dictionary with test params got from pytest test object
Returns:
(str): formatted string with replaced params name by params value
"""
target_string: str = source_string
for match in re.findall(r"\{(.*?)}", source_string):
nestings_attrs = match.split(".")
param = None
for nesting_attr in nestings_attrs:
if not param:
param = test_params.get(nesting_attr)
else:
if isinstance(param, dict):
param = param.get(nesting_attr)
else:
param = getattr(param, nesting_attr)
target_string = target_string.replace(f"{{{match}}}", str(param))
return target_string
def __get_test_case_from_pytest_test__(self, test) -> TestCase:
"""
Parce test meta and return test case if there is enough information for that.
Args:
test: pytest Function object
Returns:
(TestCase): return tests cases if there is enough information for that and None if not
"""
# Default values for use behind
suite_name: str = None
suite_section_name: str = None
test_case_steps = dict()
test_case_params: str = ""
test_case_description: str = ""
# Read test_case suite and section name from test class if possible and get test function from class
if test.cls:
suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name)
suite_section_name = test.cls.__dict__.get(
"__test_case_suite_section__", suite_section_name
)
test_function = test.cls.__dict__[test.originalname]
else:
# If no test class, read test function from module
test_function = test.module.__dict__[test.originalname]
# Read base values from test function arguments
test_case_id = test_function.__dict__.get("__test_case_id__", None)
test_case_title = test_function.__dict__.get("__test_case_title__", None)
test_case_priority = test_function.__dict__.get("__test_case_priority__", None)
suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name)
suite_section_name = test_function.__dict__.get(
"__test_case_suite_section__", suite_section_name
)
# Parce test_steps if they define in __doc__
doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE)
if doc_string.short_description:
test_case_description = doc_string.short_description
if doc_string.long_description:
test_case_description = (
f"{doc_string.short_description}\r\n{doc_string.long_description}"
)
if doc_string.meta:
for meta in doc_string.meta:
if meta.args[0] == "steps":
test_case_steps[meta.args[1]] = meta.description
# Read params from tests function if its exist
test_case_call_spec = getattr(test, "callspec", "")
if test_case_call_spec:
# Set test cases params string value
test_case_params = test_case_call_spec.id
# Format title with params
if test_case_title:
test_case_title = self.__format_string_with_params__(
test_case_title, test_case_call_spec.params
)
# Format steps with params
if test_case_steps:
for key, value in test_case_steps.items():
value = self.__format_string_with_params__(value, test_case_call_spec.params)
test_case_steps[key] = value
# If there is set basic test case attributes create TestCase and return
if test_case_id and test_case_title and suite_name and suite_name:
test_case = TestCase(
uuid_id=test_case_id,
title=test_case_title,
description=test_case_description,
priority=test_case_priority,
steps=test_case_steps,
params=test_case_params,
suite_name=suite_name,
suite_section_name=suite_section_name,
)
return test_case
# Return None if there is no enough information for return test case
return None
def pytest_report_collectionfinish(self, pytest_tests: list) -> None:
"""
!!! DO NOT CHANGE THE NANE IT IS NOT A MISTAKE
Implement specific function with specific name
Pytest will be call this function when he uses plugin in collect-only call
Args:
pytest_tests: list of pytest tests
"""
self.pytest_tests.extend(pytest_tests)
def collect_test_cases(self) -> list[TestCase]:
"""
We're collecting test cases from the pytest tests list and return them in test case representation.
Returns:
(list[TestCase]): list of test cases that we found in the pytest tests code
"""
test_cases = []
for test in self.pytest_tests:
test_case = self.__get_test_case_from_pytest_test__(test)
if test_case:
test_cases.append(test_case)
return test_cases

View file

@ -1,69 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.analytics.test_collector import TestCase
# TODO: REMOVE ME
class TСExporter(ABC):
test_cases_cache = []
test_suites_cache = []
@abstractmethod
def fill_suite_cache(self) -> None:
"""
Fill test_suite_cache by all tests cases in TMS
It's help do not call TMS each time then we search test suite
"""
@abstractmethod
def fill_cases_cache(self) -> None:
"""
Fill test_cases_cache by all tests cases in TMS
It's help do not call TMS each time then we search test case
"""
@abstractmethod
def search_test_case_id(self, test_case_id: str) -> object:
"""
Find test cases in TMS by ID
"""
@abstractmethod
def get_or_create_test_suite(self, test_suite_name: str) -> object:
"""
Get suite name with exact name or create if not exist
"""
@abstractmethod
def get_or_create_suite_section(self, test_rail_suite, section_name: str) -> object:
"""
Get suite section with exact name or create new one if not exist
"""
@abstractmethod
def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None:
"""
Create test case in TMS
"""
@abstractmethod
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
"""
Update test case in TMS
"""
def export_test_cases(self, test_cases: list[TestCase]):
# Fill caches before starting imports
self.fill_suite_cache()
self.fill_cases_cache()
for test_case in test_cases:
test_suite = self.get_or_create_test_suite(test_case.suite_name)
test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name)
test_case_in_tms = self.search_test_case_id(test_case.id)
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
if test_case_in_tms:
self.update_test_case(test_case, test_case_in_tms, test_suite, test_section)
else:
self.create_test_case(test_case, test_suite, test_section)

View file

@ -1,159 +0,0 @@
from testrail_api import TestRailAPI
from frostfs_testlib.analytics.test_collector import TestCase
from frostfs_testlib.analytics.test_exporter import TСExporter
class TestrailExporter(TСExporter):
def __init__(
self,
tr_url: str,
tr_username: str,
tr_password: str,
tr_project_id: int,
tr_template_id_without_steps: int,
tr_template_id_with_steps: int,
tr_priority_map: dict,
tr_id_field: str,
tr_description_fields: str,
tr_steps_field: str,
):
"""
Redefine init for base exporter for get test rail credentials and project on create exporter
Args:
tr_url: api url for create TestRailAPI object. See lib docs for details
tr_username: Testrail user login for api authentication
tr_password: Testrail user password for api authentication
tr_template_id_with_steps: id of test case template with steps
tr_template_id_without_steps: id of test case template without steps
tr_priority_map: mapping of TestCasePriority to priority ids in Testrail
"""
self.api: TestRailAPI = TestRailAPI(tr_url, tr_username, tr_password)
self.tr_project_id: int = tr_project_id
self.tr_template_id_without_steps = tr_template_id_without_steps
self.tr_template_id_with_steps = tr_template_id_with_steps
self.tr_priority_map = tr_priority_map
self.tr_id_field = tr_id_field
self.tr_description_fields = tr_description_fields
self.tr_steps_field = tr_steps_field
self.test_case_id_field_name = "" # TODO: Add me
def fill_suite_cache(self) -> None:
"""
Fill test_suite_cache by all tests cases in TestRail
It's help do not call TMS each time then we search test suite
"""
project_suites = self.api.suites.get_suites(project_id=self.tr_project_id)
for test_suite in project_suites:
test_suite_sections = self.api.sections.get_sections(
project_id=self.tr_project_id,
suite_id=test_suite["id"],
)
test_suite["sections"] = test_suite_sections
self.test_suites_cache.append(test_suite)
def fill_cases_cache(self) -> None:
"""
Fill test_cases_cache by all tests cases in TestRail
It's help do not call TMS each time then we search test case
"""
for test_suite in self.test_suites_cache:
self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]))
def search_test_case_id(self, test_case_id: str) -> object:
"""
Find test cases in TestRail (cache) by ID
"""
test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id]
if len(test_cases) > 1:
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
elif len(test_cases) == 1:
return test_cases.pop()
else:
return None
def get_or_create_test_suite(self, test_suite_name) -> object:
"""
Get suite name with exact name from Testrail or create if not exist
"""
test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name]
if not test_rail_suites:
test_rail_suite = self.api.suites.add_suite(
project_id=self.tr_project_id,
name=test_suite_name,
)
test_rail_suite["sections"] = list()
self.test_suites_cache.append(test_rail_suite)
return test_rail_suite
elif len(test_rail_suites) == 1:
return test_rail_suites.pop()
else:
raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}")
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
"""
Get suite section with exact name from Testrail or create new one if not exist
"""
test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name]
if not test_rail_sections:
test_rail_section = self.api.sections.add_section(
project_id=self.tr_project_id,
suite_id=test_rail_suite["id"],
name=section_name,
)
# !!!!!! BAD !!!!!! Do we really change object from cache or copy of suite object????
# !!!!!! WE have to update object in cache
# !!!!! In opposite we will try to create section twice and get error from API
test_rail_suite["sections"].append(test_rail_section)
return test_rail_section
elif len(test_rail_sections) == 1:
return test_rail_sections.pop()
else:
raise RuntimeError(f"Too many results found in test rail for section name {section_name}")
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
"""
Helper to prepare request body for add or update tests case from TestCase object
"""
request_body = {
"title": test_case.title,
"section_id": test_suite_section["id"],
self.test_case_id_field_name: test_case.id,
}
if test_case.priority:
request_body["priority_id"] = self.tr_priority_map.get(test_case.priority)
if test_case.steps:
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
request_body[self.tr_steps_field] = steps
request_body["template_id"] = self.tr_template_id_with_steps
else:
request_body["template_id"] = self.tr_template_id_without_steps
if test_case.description:
request_body[self.tr_description_fields] = self.tr_description_fields
return request_body
def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None:
"""
Create test case in Testrail
"""
request_body = self.prepare_request_body(test_case, test_suite, test_suite_section)
self.api.cases.add_case(**request_body)
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
"""
Update test case in Testrail
"""
request_body = self.prepare_request_body(test_case, test_suite, test_suite_section)
self.api.cases.update_case(case_id=test_case_in_tms["id"], **request_body)

View file

@ -1,2 +0,0 @@
from frostfs_testlib.blockchain.multisig import Multisig
from frostfs_testlib.blockchain.rpc_client import RPCClient

View file

@ -1,51 +0,0 @@
from frostfs_testlib.cli import NeoGo
class Multisig:
def __init__(self, neogo: NeoGo, invoke_tx_file: str, block_period: int):
self.neogo = neogo
self.invoke_tx_file = invoke_tx_file
self.block_period = block_period
def create_and_send(
self,
contract_hash: str,
contract_args: str,
multisig_hash: str,
wallets: list[str],
passwords: list[str],
address: str,
endpoint: str,
) -> None:
if not len(wallets):
raise AttributeError("Got empty wallets list")
self.neogo.contract.invokefunction(
address=address,
rpc_endpoint=endpoint,
wallet=wallets[0],
wallet_password=passwords[0],
out=None if len(wallets) == 1 else self.invoke_tx_file,
scripthash=contract_hash,
arguments=contract_args,
multisig_hash=multisig_hash,
)
if len(wallets) > 1:
# sign with rest of wallets except the last one
for wallet in wallets[1:-1]:
self.neogo.wallet.sign(
wallet=wallet,
input_file=self.invoke_tx_file,
out=self.invoke_tx_file,
address=address,
)
# sign tx with last wallet and push it to blockchain
self.neogo.wallet.sign(
wallet=wallets[-1],
input_file=self.invoke_tx_file,
out=self.invoke_tx_file,
address=address,
rpc_endpoint=endpoint,
)

View file

@ -1,155 +0,0 @@
import json
from time import sleep
from typing import Optional
from frostfs_testlib.blockchain import Multisig
from frostfs_testlib.cli import NeoGo
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils.converting_utils import process_b64_bytearray
class RoleDesignation:
def __init__(
self,
shell: Shell,
neo_go_exec_path: str,
block_period: int,
designate_contract: str,
):
self.neogo = NeoGo(shell, neo_go_exec_path)
self.block_period = block_period
self.designate_contract = designate_contract
def set_notary_nodes(
self,
addr: str,
pubkeys: list[str],
script_hash: str,
wallet: str,
passwd: str,
endpoint: str,
) -> str:
keys = [f"bytes:{k}" for k in pubkeys]
keys_str = " ".join(keys)
out = self.neogo.contract.invokefunction(
address=addr,
scripthash=self.designate_contract,
wallet=wallet,
wallet_password=passwd,
rpc_endpoint=endpoint,
arguments=f"designateAsRole int:32 [ {keys_str} ] -- {script_hash}",
force=True,
)
sleep(self.block_period)
return out.stdout.split(" ")[-1]
def set_inner_ring(
self,
addr: str,
pubkeys: list[str],
script_hash: str,
wallet: str,
passwd: str,
endpoint: str,
) -> str:
keys = [f"bytes:{k}" for k in pubkeys]
keys_str = " ".join(keys)
out = self.neogo.contract.invokefunction(
address=addr,
scripthash=self.designate_contract,
wallet=wallet,
wallet_password=passwd,
rpc_endpoint=endpoint,
arguments=f"designateAsRole int:16 [ {keys_str} ] -- {script_hash}",
force=True,
)
sleep(self.block_period)
return out.stdout.split(" ")[-1]
def set_oracles(
self,
addr: str,
pubkeys: list[str],
script_hash: str,
wallet: str,
passwd: str,
endpoint: str,
) -> str:
keys = [f"bytes:{k}" for k in pubkeys]
keys_str = " ".join(keys)
out = self.neogo.contract.invokefunction(
address=addr,
scripthash=self.designate_contract,
wallet=wallet,
wallet_password=passwd,
rpc_endpoint=endpoint,
arguments=f"designateAsRole int:8 [ {keys_str} ] -- {script_hash}",
force=True,
)
sleep(self.block_period)
return out.stdout.split(" ")[-1]
def set_notary_nodes_multisig_tx(
self,
pubkeys: list[str],
script_hash: str,
wallets: list[str],
passwords: list[str],
address: str,
endpoint: str,
invoke_tx_file: str,
) -> None:
keys = [f"bytes:{k}" for k in pubkeys]
keys_str = " ".join(keys)
multisig = Multisig(
self.neogo, invoke_tx_file=invoke_tx_file, block_period=self.block_period
)
multisig.create_and_send(
self.designate_contract,
f"designateAsRole int:32 [ {keys_str} ]",
script_hash,
wallets,
passwords,
address,
endpoint,
)
sleep(self.block_period)
def set_inner_ring_multisig_tx(
self,
pubkeys: list[str],
script_hash: str,
wallets: list[str],
passwords: list[str],
address: str,
endpoint: str,
invoke_tx_file: str,
) -> None:
keys = [f"bytes:{k}" for k in pubkeys]
keys_str = " ".join(keys)
multisig = Multisig(
self.neogo, invoke_tx_file=invoke_tx_file, block_period=self.block_period
)
multisig.create_and_send(
self.designate_contract,
f"designateAsRole int:16 [ {keys_str} ]",
script_hash,
wallets,
passwords,
address,
endpoint,
)
sleep(self.block_period)
def check_candidates(self, contract_hash: str, endpoint: str) -> Optional[list[str]]:
out = self.neogo.contract.testinvokefunction(
scripthash=contract_hash,
method="innerRingCandidates",
rpc_endpoint=endpoint,
)
output_dict = json.loads(out.stdout.replace("\n", ""))
candidates = output_dict["stack"][0]["value"]
if len(candidates) == 0:
return None
# TODO: return a list of keys
return [process_b64_bytearray(candidate["value"][0]["value"]) for candidate in candidates]

View file

@ -1,80 +0,0 @@
import json
import logging
from typing import Any, Dict, Optional
import requests
logger = logging.getLogger("frostfs.testlib.blockchain")
class NeoRPCException(Exception):
pass
class RPCClient:
def __init__(self, endpoint, timeout: int = 10):
self.endpoint = endpoint
self.timeout = timeout
def get_raw_transaction(self, tx_hash):
return self._call_endpoint("getrawtransaction", params=[tx_hash])
def send_raw_transaction(self, raw_tx: str):
return self._call_endpoint("sendrawtransaction", params=[raw_tx])
def get_storage(self, sc_hash: str, storage_key: str):
return self._call_endpoint("getstorage", params=[sc_hash, storage_key])
def invoke_function(
self,
sc_hash: str,
function: str,
params: Optional[list] = None,
signers: Optional[list] = None,
) -> Dict[str, Any]:
return self._call_endpoint(
"invokefunction", params=[sc_hash, function, params or [], signers or []]
)
def get_transaction_height(self, txid: str):
return self._call_endpoint("gettransactionheight", params=[txid])
def get_nep17_transfers(self, address, timestamps=None):
params = [address]
if timestamps:
params.append(timestamps)
return self._call_endpoint("getnep17transfers", params)
def get_nep17_balances(self, address):
return self._call_endpoint("getnep17balances", [address, 0])
def get_application_log(self, tx_hash):
return self._call_endpoint("getapplicationlog", params=[tx_hash])
def get_contract_state(self, contract_id):
"""
`contract_id` might be contract name, script hash or number
"""
return self._call_endpoint("getcontractstate", params=[contract_id])
def _call_endpoint(self, method, params=None) -> Dict[str, Any]:
payload = _build_payload(method, params)
logger.info(payload)
try:
response = requests.post(self.endpoint, data=payload, timeout=self.timeout)
response.raise_for_status()
if response.status_code == 200:
if "result" in response.json():
return response.json()["result"]
return response.json()
except Exception as exc:
raise NeoRPCException(
f"Could not call method {method} "
f"with endpoint: {self.endpoint}: {exc}"
f"\nRequest sent: {payload}"
) from exc
def _build_payload(method, params: Optional[list] = None):
payload = json.dumps({"jsonrpc": "2.0", "method": method, "params": params or [], "id": 1})
return payload.replace("'", '"')

View file

@ -1,5 +0,0 @@
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
from frostfs_testlib.cli.frostfs_cli import FrostfsCli
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.cli.neogo import NeoGo, NetworkType

View file

@ -1,81 +0,0 @@
from typing import Optional
from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell
from frostfs_testlib.utils.datetime_utils import parse_time
class CliCommand:
TIMEOUT_INACCURACY = 10
WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location"
WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password"
cli_exec_path: Optional[str] = None
__base_params: Optional[str] = None
map_params = {
"json_mode": "json",
"await_mode": "await",
"hash_type": "hash",
"doc_type": "type",
"to_address": "to",
"from_address": "from",
"to_file": "to",
"from_file": "from",
}
def __init__(self, shell: Shell, cli_exec_path: str, **base_params):
self.shell = shell
self.cli_exec_path = cli_exec_path
self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value])
def _format_command(self, command: str, **params) -> str:
param_str = []
for param, value in params.items():
if param == "post_data":
param_str.append(value)
continue
if param in self.map_params.keys():
param = self.map_params[param]
param = param.replace("_", "-")
if not value:
continue
if isinstance(value, bool):
param_str.append(f"--{param}")
elif isinstance(value, int):
param_str.append(f"--{param} {value}")
elif isinstance(value, list):
for value_item in value:
val_str = str(value_item).replace("'", "\\'")
param_str.append(f"--{param} '{val_str}'")
elif isinstance(value, dict):
param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'')
else:
if "'" in str(value):
value_str = str(value).replace('"', '\\"')
param_str.append(f'--{param} "{value_str}"')
else:
param_str.append(f"--{param} '{value}'")
param_str = " ".join(param_str)
return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}"
def _execute(self, command: Optional[str], **params) -> CommandResult:
if timeout := params.get("timeout"):
timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY
return self.shell.exec(
self._format_command(command, **params),
CommandOptions(timeout=timeout),
)
def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult:
if timeout := params.get("timeout"):
timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY
return self.shell.exec(
self._format_command(command, **params),
CommandOptions(
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)],
timeout=timeout,
),
)

View file

@ -1 +0,0 @@
from frostfs_testlib.cli.frostfs_adm.adm import FrostfsAdm

View file

@ -1,25 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.frostfs_adm.config import FrostfsAdmConfig
from frostfs_testlib.cli.frostfs_adm.morph import FrostfsAdmMorph
from frostfs_testlib.cli.frostfs_adm.storage_config import FrostfsAdmStorageConfig
from frostfs_testlib.cli.frostfs_adm.subnet import FrostfsAdmMorphSubnet
from frostfs_testlib.cli.frostfs_adm.version import FrostfsAdmVersion
from frostfs_testlib.shell import Shell
class FrostfsAdm:
morph: FrostfsAdmMorph
subnet: FrostfsAdmMorphSubnet
storage_config: FrostfsAdmStorageConfig
version: FrostfsAdmVersion
config: FrostfsAdmConfig
def __init__(self, shell: Shell, frostfs_adm_exec_path: str, config_file: Optional[str] = None):
self.config = FrostfsAdmConfig(shell, frostfs_adm_exec_path, config=config_file)
self.morph = FrostfsAdmMorph(shell, frostfs_adm_exec_path, config=config_file)
self.subnet = FrostfsAdmMorphSubnet(shell, frostfs_adm_exec_path, config=config_file)
self.storage_config = FrostfsAdmStorageConfig(
shell, frostfs_adm_exec_path, config=config_file
)
self.version = FrostfsAdmVersion(shell, frostfs_adm_exec_path, config=config_file)

View file

@ -1,22 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAdmConfig(CliCommand):
def init(self, path: str = "~/.frostfs/adm/config.yml") -> CommandResult:
"""Initialize basic frostfs-adm configuration file.
Args:
path: Path to config (default ~/.frostfs/adm/config.yml).
Returns:
Command's result.
"""
return self._execute(
"config init",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)

View file

@ -1,488 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAdmMorph(CliCommand):
def deposit_notary(
self,
rpc_endpoint: str,
account: str,
gas: str,
storage_wallet: Optional[str] = None,
till: Optional[str] = None,
) -> CommandResult:
"""Deposit GAS for notary service.
Args:
account: Wallet account address.
gas: Amount of GAS to deposit.
rpc_endpoint: N3 RPC node endpoint.
storage_wallet: Path to storage node wallet.
till: Notary deposit duration in blocks.
Returns:
Command's result.
"""
return self._execute(
"morph deposit-notary",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def dump_balances(
self,
rpc_endpoint: str,
alphabet: Optional[str] = None,
proxy: Optional[str] = None,
script_hash: Optional[str] = None,
storage: Optional[str] = None,
) -> CommandResult:
"""Dump GAS balances.
Args:
alphabet: Dump balances of alphabet contracts.
proxy: Dump balances of the proxy contract.
rpc_endpoint: N3 RPC node endpoint.
script_hash: Use script-hash format for addresses.
storage: Dump balances of storage nodes from the current netmap.
Returns:
Command's result.
"""
return self._execute(
"morph dump-balances",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def dump_config(self, rpc_endpoint: str) -> CommandResult:
"""Section for morph network configuration commands.
Args:
rpc_endpoint: N3 RPC node endpoint
Returns:
Command's result.
"""
return self._execute(
"morph dump-config",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
"""Add/update global config value in the FrostFS network.
Args:
set_key_value: key1=val1 [key2=val2 ...]
alphabet_wallets: Path to alphabet wallets dir
rpc_endpoint: N3 RPC node endpoint
Returns:
Command's result.
"""
return self._execute(
f"morph set-config {set_key_value}",
**{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]},
)
def dump_containers(
self,
rpc_endpoint: str,
cid: Optional[str] = None,
container_contract: Optional[str] = None,
dump: str = "./testlib_dump_container",
) -> CommandResult:
"""Dump FrostFS containers to file.
Args:
cid: Containers to dump.
container_contract: Container contract hash (for networks without NNS).
dump: File where to save dumped containers (default: ./testlib_dump_container).
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph dump-containers",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult:
"""Dump deployed contract hashes.
Args:
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph dump-hashes",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def force_new_epoch(
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None
) -> CommandResult:
"""Create new FrostFS epoch event in the side chain.
Args:
alphabet_wallets: Path to alphabet wallets dir.
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph force-new-epoch",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def generate_alphabet(
self,
rpc_endpoint: str,
alphabet_wallets: str,
size: int = 7,
) -> CommandResult:
"""Generate alphabet wallets for consensus nodes of the morph network.
Args:
alphabet_wallets: Path to alphabet wallets dir.
size: Amount of alphabet wallets to generate (default 7).
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph generate-alphabet",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def generate_storage_wallet(
self,
rpc_endpoint: str,
alphabet_wallets: str,
storage_wallet: str,
initial_gas: Optional[str] = None,
) -> CommandResult:
"""Generate storage node wallet for the morph network.
Args:
alphabet_wallets: Path to alphabet wallets dir.
initial_gas: Initial amount of GAS to transfer.
rpc_endpoint: N3 RPC node endpoint.
storage_wallet: Path to new storage node wallet.
Returns:
Command's result.
"""
return self._execute(
"morph generate-storage-wallet",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def init(
self,
rpc_endpoint: str,
alphabet_wallets: str,
contracts: str,
protocol: str,
container_alias_fee: int = 500,
container_fee: int = 1000,
epoch_duration: int = 240,
homomorphic_disabled: bool = False,
local_dump: Optional[str] = None,
max_object_size: int = 67108864,
) -> CommandResult:
"""Section for morph network configuration commands.
Args:
alphabet_wallets: Path to alphabet wallets dir.
container_alias_fee: Container alias fee (default 500).
container_fee: Container registration fee (default 1000).
contracts: Path to archive with compiled FrostFS contracts
(default fetched from latest git release).
epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240).
homomorphic_disabled: Disable object homomorphic hashing.
local_dump: Path to the blocks dump file.
max_object_size: Max single object size in bytes (default 67108864).
protocol: Path to the consensus node configuration.
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph init",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def refill_gas(
self,
rpc_endpoint: str,
alphabet_wallets: str,
storage_wallet: str,
gas: Optional[str] = None,
) -> CommandResult:
"""Refill GAS of storage node's wallet in the morph network
Args:
alphabet_wallets: Path to alphabet wallets dir.
gas: Additional amount of GAS to transfer.
rpc_endpoint: N3 RPC node endpoint.
storage_wallet: Path to new storage node wallet.
Returns:
Command's result.
"""
return self._execute(
"morph refill-gas",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def restore_containers(
self,
rpc_endpoint: str,
alphabet_wallets: str,
cid: str,
dump: str,
) -> CommandResult:
"""Restore FrostFS containers from file.
Args:
alphabet_wallets: Path to alphabet wallets dir.
cid: Containers to restore.
dump: File to restore containers from.
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph restore-containers",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def set_policy(
self,
rpc_endpoint: str,
alphabet_wallets: str,
exec_fee_factor: Optional[int] = None,
storage_price: Optional[int] = None,
fee_per_byte: Optional[int] = None,
) -> CommandResult:
"""Set global policy values.
Args:
alphabet_wallets: Path to alphabet wallets dir.
exec_fee_factor: ExecFeeFactor=<n1>.
storage_price: StoragePrice=<n2>.
fee_per_byte: FeePerByte=<n3>.
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
non_param_attribute = ""
if exec_fee_factor:
non_param_attribute += f"ExecFeeFactor={exec_fee_factor} "
if storage_price:
non_param_attribute += f"StoragePrice={storage_price} "
if fee_per_byte:
non_param_attribute += f"FeePerByte={fee_per_byte} "
return self._execute(
f"morph restore-containers {non_param_attribute}",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self", "exec_fee_factor", "storage_price", "fee_per_byte"]
},
)
def update_contracts(
self,
rpc_endpoint: str,
alphabet_wallets: str,
contracts: Optional[str] = None,
) -> CommandResult:
"""Update FrostFS contracts.
Args:
alphabet_wallets: Path to alphabet wallets dir.
contracts: Path to archive with compiled FrostFS contracts
(default fetched from latest git release).
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
return self._execute(
"morph update-contracts",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def remove_nodes(
self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
) -> CommandResult:
"""Move node to the Offline state in the candidates list
and tick an epoch to update the netmap using frostfs-adm
Args:
node_netmap_keys: list of nodes netmap keys.
alphabet_wallets: Path to alphabet wallets dir.
rpc_endpoint: N3 RPC node endpoint.
Returns:
Command's result.
"""
if not len(node_netmap_keys):
raise AttributeError("Got empty node_netmap_keys list")
return self._execute(
f"morph remove-nodes {' '.join(node_netmap_keys)}",
**{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]},
)
def add_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape add-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape get-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape list-rule-chains",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_name: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape rm-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_nns_records(
self,
name: str,
type: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
alphabet_wallets: Optional[str] = None,
) -> CommandResult:
"""Returns domain record of the specified type
Args:
name: Domain name
type: Domain name service record type(A|CNAME|SOA|TXT)
rpc_endpoint: N3 RPC node endpoint
alphabet_wallets: path to alphabet wallets dir
Returns:
Command's result
"""
return self._execute(
"morph nns get-records",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,23 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAdmStorageConfig(CliCommand):
def set(self, account: str, wallet: str) -> CommandResult:
"""Initialize basic frostfs-adm configuration file.
Args:
account: Wallet account.
wallet: Path to wallet.
Returns:
Command's result.
"""
return self._execute(
"storage-config",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)

View file

@ -1,239 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAdmMorphSubnet(CliCommand):
def create(
self, rpc_endpoint: str, address: str, wallet: str, notary: bool = False
) -> CommandResult:
"""Create FrostFS subnet.
Args:
address: Address in the wallet, optional.
notary: Flag to create subnet in notary environment.
rpc_endpoint: N3 RPC node endpoint.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet create",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def get(self, rpc_endpoint: str, subnet: str) -> CommandResult:
"""Read information about the FrostFS subnet.
Args:
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
Returns:
Command's result.
"""
return self._execute(
"morph subnet get",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def remove(
self, rpc_endpoint: str, wallet: str, subnet: str, address: Optional[str] = None
) -> CommandResult:
"""Remove FrostFS subnet.
Args:
address: Address in the wallet, optional.
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet remove",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def admin_add(
self,
rpc_endpoint: str,
wallet: str,
admin: str,
subnet: str,
client: Optional[str] = None,
group: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Add admin to the FrostFS subnet.
Args:
address: Address in the wallet, optional.
admin: Hex-encoded public key of the admin.
client: Add client admin instead of node one.
group: Client group ID in text format (needed with --client only).
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet admin add",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def admin_remove(
self,
rpc_endpoint: str,
wallet: str,
admin: str,
subnet: str,
client: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Remove admin of the FrostFS subnet.
Args:
address: Address in the wallet, optional.
admin: Hex-encoded public key of the admin.
client: Remove client admin instead of node one.
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet admin remove",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def client_add(
self,
rpc_endpoint: str,
wallet: str,
subnet: str,
client: Optional[str] = None,
group: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Add client to the FrostFS subnet.
Args:
address: Address in the wallet, optional.
client: Add client admin instead of node one.
group: Client group ID in text format (needed with --client only).
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet client add",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def client_remove(
self,
rpc_endpoint: str,
wallet: str,
client: str,
group: str,
subnet: str,
address: Optional[str] = None,
) -> CommandResult:
"""Remove client of the FrostFS subnet.
Args:
address: Address in the wallet, optional.
client: Remove client admin instead of node one.
group: ID of the client group to work with.
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet client remove",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def node_add(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult:
"""Add node to the FrostFS subnet.
Args:
node: Hex-encoded public key of the node.
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet node add",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def node_remove(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult:
"""Remove node from the FrostFS subnet.
Args:
node: Hex-encoded public key of the node.
rpc_endpoint: N3 RPC node endpoint.
subnet: ID of the subnet to read.
wallet: Path to file with wallet.
Returns:
Command's result.
"""
return self._execute(
"morph subnet node remove",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)

View file

@ -1,12 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAdmVersion(CliCommand):
def get(self) -> CommandResult:
"""Application version
Returns:
Command's result.
"""
return self._execute("", version=True)

View file

@ -1 +0,0 @@
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate

View file

@ -1,14 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.frostfs_authmate.secret import FrostfsAuthmateSecret
from frostfs_testlib.cli.frostfs_authmate.version import FrostfsAuthmateVersion
from frostfs_testlib.shell import Shell
class FrostfsAuthmate:
secret: FrostfsAuthmateSecret
version: FrostfsAuthmateVersion
def __init__(self, shell: Shell, frostfs_authmate_exec_path: str):
self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path)
self.version = FrostfsAuthmateVersion(shell, frostfs_authmate_exec_path)

View file

@ -1,90 +0,0 @@
from typing import Optional, Union
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAuthmateSecret(CliCommand):
def obtain(
self,
wallet: str,
wallet_password: str,
peer: str,
gate_wallet: str,
access_key_id: str,
address: Optional[str] = None,
gate_address: Optional[str] = None,
) -> CommandResult:
"""Obtain a secret from FrostFS network.
Args:
wallet: Path to the wallet.
wallet_password: Wallet password.
address: Address of wallet account.
peer: Address of frostfs peer to connect to.
gate_wallet: Path to the wallet.
gate_address: Address of wallet account.
access_key_id: Access key id for s3.
Returns:
Command's result.
"""
return self._execute_with_password(
"obtain-secret",
wallet_password,
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def issue(
self,
wallet: str,
wallet_password: str,
peer: str,
gate_public_key: Union[str, list[str]],
address: Optional[str] = None,
container_id: Optional[str] = None,
container_friendly_name: Optional[str] = None,
container_placement_policy: Optional[str] = None,
session_tokens: Optional[str] = None,
lifetime: Optional[str] = None,
container_policy: Optional[str] = None,
aws_cli_credentials: Optional[str] = None,
) -> CommandResult:
"""Obtain a secret from FrostFS network
Args:
wallet: Path to the wallet.
wallet_password: Wallet password.
address: Address of wallet account.
peer: Address of a frostfs peer to connect to.
bearer_rules: Rules for bearer token as plain json string.
gate_public_key: Public 256r1 key of a gate (send list[str] of keys to use multiple gates).
container_id: Auth container id to put the secret into.
container_friendly_name: Friendly name of auth container to put the secret into.
container_placement_policy: Placement policy of auth container to put the secret into
(default: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X").
session_tokens: Create session tokens with rules, if the rules are set as 'none', no
session tokens will be created.
lifetime: Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to
set a day you should use 24h). It will be ceil rounded to the nearest amount of
epoch. (default: 720h0m0s).
container_policy: Mapping AWS storage class to FrostFS storage policy as plain json string
or path to json file.
aws_cli_credentials: Path to the aws cli credential file.
Returns:
Command's result.
"""
return self._execute_with_password(
"issue-secret",
wallet_password,
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)

View file

@ -1,12 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsAuthmateVersion(CliCommand):
def get(self) -> CommandResult:
"""Application version
Returns:
Command's result.
"""
return self._execute("", version=True)

View file

@ -1 +0,0 @@
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli

View file

@ -1,30 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliAccounting(CliCommand):
def balance(
self,
wallet: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
address: Optional[str] = None,
owner: Optional[str] = None,
) -> CommandResult:
"""Get internal balance of FrostFS account
Args:
address: Address of wallet account.
owner: Owner of balance account (omit to use owner from private key).
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
Returns:
Command's result.
"""
return self._execute(
"accounting balance",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,52 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliACL(CliCommand):
def extended_create(
self, cid: str, out: str, file: Optional[str] = None, rule: Optional[list] = None
) -> CommandResult:
"""Create extended ACL from the text representation.
Rule consist of these blocks: <action> <operation> [<filter1> ...] [<target1> ...]
Action is 'allow' or 'deny'.
Operation is an object service verb: 'get', 'head', 'put', 'search', 'delete', 'getrange',
or 'getrangehash'.
Filter consists of <typ>:<key><match><value>
Typ is 'obj' for object applied filter or 'req' for request applied filter.
Key is a valid unicode string corresponding to object or request header key.
Well-known system object headers start with '$Object:' prefix.
User defined headers start without prefix.
Read more about filter keys at:
https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter
Match is '=' for matching and '!=' for non-matching filter.
Value is a valid unicode string corresponding to object or request header value.
Target is
'user' for container owner,
'system' for Storage nodes in container and Inner Ring nodes,
'others' for all other request senders,
'pubkey:<key1>,<key2>,...' for exact request sender, where <key> is a hex-encoded 33-byte
public key.
When both '--rule' and '--file' arguments are used, '--rule' records will be placed higher
in resulting extended ACL table.
Args:
cid: Container ID.
file: Read list of extended ACL table records from from text file.
out: Save JSON formatted extended ACL table in file.
rule: Extended ACL table record to apply.
Returns:
Command's result.
"""
return self._execute(
"acl extended create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,70 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliApeManager(CliCommand):
"""Operations with APE manager."""
def add(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] | Optional[list[str]] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Add rule chain for a target."""
return self._execute(
"ape-manager add",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
rpc_endpoint: str,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager remove",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,54 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliBearer(CliCommand):
def create(
self,
rpc_endpoint: str,
out: str,
issued_at: Optional[str] = None,
expire_at: Optional[str] = None,
not_valid_before: Optional[str] = None,
ape: Optional[str] = None,
eacl: Optional[str] = None,
owner: Optional[str] = None,
json: Optional[bool] = False,
impersonate: Optional[bool] = False,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Create bearer token.
All epoch flags can be specified relative to the current epoch with the +n syntax.
In this case --rpc-endpoint flag should be specified and the epoch in bearer token
is set to current epoch + n.
"""
return self._execute(
"bearer create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def generate_ape_override(
self,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
cid: Optional[str] = None,
output: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"bearer generate-ape-override",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,47 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager
from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup
from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree
from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil
from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion
from frostfs_testlib.shell import Shell
class FrostfsCli:
accounting: FrostfsCliAccounting
acl: FrostfsCliACL
container: FrostfsCliContainer
netmap: FrostfsCliNetmap
object: FrostfsCliObject
session: FrostfsCliSession
shards: FrostfsCliShards
storagegroup: FrostfsCliStorageGroup
util: FrostfsCliUtil
version: FrostfsCliVersion
control: FrostfsCliControl
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)
self.acl = FrostfsCliACL(shell, frostfs_cli_exec_path, config=config_file)
self.container = FrostfsCliContainer(shell, frostfs_cli_exec_path, config=config_file)
self.netmap = FrostfsCliNetmap(shell, frostfs_cli_exec_path, config=config_file)
self.object = FrostfsCliObject(shell, frostfs_cli_exec_path, config=config_file)
self.session = FrostfsCliSession(shell, frostfs_cli_exec_path, config=config_file)
self.shards = FrostfsCliShards(shell, frostfs_cli_exec_path, config=config_file)
self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file)
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file)
self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file)

View file

@ -1,332 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliContainer(CliCommand):
def create(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
Args:
address: Address of wallet account.
attributes: Comma separated pairs of container attributes in form of
Key1=Value1,Key2=Value2.
await_mode: Block execution until container is persisted.
basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write',
'private', 'eacl-public-read' (default "private").
disable_timestamp: Disable timestamp container attribute.
force: Skip placement validity check.
trace: Generate trace ID and print it.
name: Container name attribute.
nonce: UUIDv4 nonce value for container.
policy: QL-encoded or JSON-encoded placement policy or path to file with it.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Path to a JSON-encoded container session token.
subnet: String representation of container subnetwork.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
nns_zone: Container nns zone attribute.
nns_name: Container nns name attribute.
Returns:
Command's result.
"""
return self._execute(
"container create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def delete(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> CommandResult:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
Args:
address: Address of wallet account.
await_mode: Block execution until container is removed.
cid: Container ID.
trace: Generate trace ID and print it.
force: Do not check whether container contains locks and remove immediately.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
Returns:
Command's result.
"""
return self._execute(
"container delete",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = False,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get container field info.
Args:
address: Address of wallet account.
await_mode: Block execution until container is removed.
cid: Container ID.
json_mode: Print or dump container in JSON format.
trace: Generate trace ID and print it.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
to: Path to dump encoded container.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns:
Command's result.
"""
return self._execute(
"container get",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_eacl(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get extended ACL table of container.
Args:
address: Address of wallet account.
await_mode: Block execution until container is removed.
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
to: Path to dump encoded container.
json_mode: Print or dump container in JSON format.
trace: Generate trace ID and print it.
session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns:
Command's result.
"""
return self._execute(
"container get-eacl",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
rpc_endpoint: str,
name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> CommandResult:
"""
List all created containers.
Args:
address: Address of wallet account.
name: List containers by the attribute name.
owner: Owner of containers (omit to use owner from private key).
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
trace: Generate trace ID and print it.
timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns:
Command's result.
"""
return self._execute(
"container list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_objects(
self,
rpc_endpoint: str,
cid: str,
bearer: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
List existing objects in container.
Args:
address: Address of wallet account.
cid: Container ID.
bearer: File with signed JSON or binary encoded bearer token.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
trace: Generate trace ID and print it.
timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns:
Command's result.
"""
return self._execute(
"container list-objects",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
# TODO Deprecated method with 0.42
def set_eacl(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
await_mode: bool = False,
table: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Set a new extended ACL table for the container.
Container ID in the EACL table will be substituted with the ID from the CLI.
Args:
address: Address of wallet account.
await_mode: Block execution until container is removed.
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Path to a JSON-encoded container session token.
table: Path to file with JSON or binary encoded EACL table.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"container set-eacl",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def search_node(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Show the nodes participating in the container in the current epoch.
Args:
rpc_endpoint: string Remote host address (as 'multiaddr' or '<host>:<port>')
wallet: WIF (NEP-2) string or path to the wallet or binary key.
cid: Container ID.
address: Address of wallet account.
ttl: TTL value in request meta header (default 2).
from_file: string File path with encoded container
timeout: duration Timeout for the operation (default 15 s)
short: shorten the output of node information.
trace: Generate trace ID and print it.
xhdr: Dict with request X-Headers.
generate_key: Generate a new private key.
Returns:
"""
from_str = f"--from {from_file}" if from_file else ""
return self._execute(
f"container nodes {from_str}",
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
)

View file

@ -1,232 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliControl(CliCommand):
def set_status(
self,
endpoint: str,
status: str,
wallet: Optional[str] = None,
force: Optional[bool] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Set status of the storage node in FrostFS network map
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
force: Force turning to local maintenance
status: New netmap status keyword ('online', 'offline', 'maintenance')
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control set-status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def healthcheck(
self,
endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Health check for FrostFS storage nodes
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def drop_objects(
self,
endpoint: str,
objects: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
objects: List of object addresses to be removed in string format
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control drop-objects",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
endpoint: str,
target_name: str,
target_type: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-rules",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_targets(
self,
endpoint: str,
chain_name: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-name: Chain name(ingress|s3)
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-targets",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,132 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliNetmap(CliCommand):
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get current epoch number.
Args:
address: Address of wallet account.
generate_key: Generate new private key.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2).
wallet: Path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"netmap epoch",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get information about FrostFS network.
Args:
address: Address of wallet account
generate_key: Generate new private key
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>')
ttl: TTL value in request meta header (default 2)
wallet: Path to the wallet or binary key
xhdr: Request X-Headers in form of Key=Value
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"netmap netinfo",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get target node info.
Args:
address: Address of wallet account.
generate_key: Generate new private key.
json: Print node info in JSON format.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2).
wallet: Path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"netmap nodeinfo",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Request current local snapshot of the network map.
Args:
address: Address of wallet account.
generate_key: Generate new private key.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2).
wallet: Path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"netmap snapshot",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,458 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliObject(CliCommand):
def delete(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Delete object from FrostFS.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
generate_key: Generate new private key.
oid: Object ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object DELETE session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object delete",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None,
header: Optional[str] = None,
no_progress: bool = False,
raw: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get object from FrostFS.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
header: File to write header to. Default: stdout.
no_progress: Do not show progress bar.
oid: Object ID.
raw: Set raw request option.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object GET session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object get",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def hash(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get object hash.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
generate_key: Generate new private key.
oid: Object ID.
range: Range to take hash from in the form offset1:length1,...
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
salt: Salt in hex format.
ttl: TTL value in request meta header (default 2).
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256").
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object hash",
**{param: value for param, value in locals().items() if param not in ["self", "params"]},
)
def head(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None,
json_mode: bool = False,
main_only: bool = False,
proto: bool = False,
raw: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get object header.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
json_mode: Marshal output in JSON.
main_only: Return only main fields.
oid: Object ID.
proto: Marshal output in Protobuf.
raw: Set raw request option.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object HEAD session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object head",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def lock(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Lock object in container.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
generate_key: Generate new private key.
oid: Object ID.
lifetime: Lock lifetime.
expire_at: Lock expiration epoch.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object PUT session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object lock",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def put(
self,
rpc_endpoint: str,
cid: str,
file: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
copies_number: Optional[int] = None,
disable_filename: bool = False,
disable_timestamp: bool = False,
expire_at: Optional[int] = None,
no_progress: bool = False,
notify: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Put object to FrostFS.
Args:
address: Address of wallet account.
attributes: User attributes in form of Key1=Value1,Key2=Value2.
bearer: File with signed JSON or binary encoded bearer token.
copies_number: Number of copies of the object to store within the RPC call.
cid: Container ID.
disable_filename: Do not set well-known filename attribute.
disable_timestamp: Do not set well-known timestamp attribute.
expire_at: Last epoch in the life of the object.
file: File with object payload.
generate_key: Generate new private key.
no_progress: Do not show progress bar.
notify: Object notification in the form of *epoch*:*topic*; '-'
topic means using default.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object PUT session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object put",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def patch(
self,
rpc_endpoint: str,
cid: str,
oid: str,
range: list[str] = None,
payload: list[str] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
ttl: Optional[int] = None,
wallet: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> CommandResult:
"""
PATCH an object.
Args:
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>')
cid: Container ID
oid: Object ID
range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
payload: An array of file paths to be applied in each range
new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
address: Address of wallet account
bearer: File with signed JSON or binary encoded bearer token
generate_key: Generate new private key
session: Filepath to a JSON- or binary-encoded token of the object RANGE session
timeout: Timeout for the operation
trace: Generate trace ID and print it
ttl: TTL value in request meta header (default 2)
wallet: WIF (NEP-2) string or path to the wallet or binary key
xhdr: Dict with request X-Headers
Returns:
Command's result.
"""
return self._execute(
"object patch",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def range(
self,
rpc_endpoint: str,
cid: str,
oid: str,
range: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None,
json_mode: bool = False,
raw: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get payload range data of an object.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
json_mode: Marshal output in JSON.
oid: Object ID.
range: Range to take data from in the form offset:length.
raw: Set raw request option.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object RANGE session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object range",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def search(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
filters: Optional[list] = None,
oid: Optional[str] = None,
phy: bool = False,
root: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Search object.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
filters: Repeated filter expressions or files with protobuf JSON.
generate_key: Generate new private key.
oid: Object ID.
phy: Search physically stored objects.
root: Search for user objects.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object SEARCH session.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object search",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def nodes(
self,
rpc_endpoint: str,
cid: str,
oid: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Search object nodes.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
generate_key: Generate new private key.
oid: Object ID.
trace: Generate trace ID and print it.
root: Search for user objects.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
verify_presence_all: Verify the actual presence of the object on all netmap nodes.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object nodes",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,35 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliSession(CliCommand):
def create(
self,
rpc_endpoint: str,
wallet: str,
out: str,
lifetime: Optional[int] = None,
address: Optional[str] = None,
json: Optional[bool] = False,
) -> CommandResult:
"""
Create session token.
Args:
address: Address of wallet account.
out: File to write session token to.
lifetime: Number of epochs for token to stay valid.
json: Output token in JSON.
wallet: WIF (NEP-2) string or path to the wallet or binary key.
wallet_password: Wallet password.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
Returns:
Command's result.
"""
return self._execute(
"session create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,261 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliShards(CliCommand):
def flush_cache(
self,
endpoint: str,
wallet: str,
wallet_password: str,
id: Optional[list[str]],
address: Optional[str] = None,
all: bool = False,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Flush objects from the write-cache to the main storage.
Args:
address: Address of wallet account.
id: List of shard IDs in base58 encoding.
all: Process all shards.
endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
wallet_password: Wallet password.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute_with_password(
f"control shards flush-cache",
wallet_password,
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def set_mode(
self,
endpoint: str,
mode: str,
id: Optional[list[str]] = None,
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
address: Optional[str] = None,
all: bool = False,
clear_errors: bool = False,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Set work mode of the shard.
Args:
address: Address of wallet account.
id: List of shard IDs in base58 encoding.
mode: New shard mode ('degraded-read-only', 'read-only', 'read-write').
all: Process all shards.
clear_errors: Set shard error count to 0.
endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
wallet_password: Wallet password.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
if not wallet_password:
return self._execute(
"control shards set-mode",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password(
"control shards set-mode",
wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
)
def dump(
self,
endpoint: str,
wallet: str,
wallet_password: str,
id: str,
path: str,
address: Optional[str] = None,
no_errors: bool = False,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Dump objects from shard to a file.
Args:
address: Address of wallet account.
no_errors: Skip invalid/unreadable objects.
id: Shard ID in base58 encoding.
path: File to write objects to.
endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
wallet_password: Wallet password.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute_with_password(
"control shards dump",
wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
)
def list(
self,
endpoint: str,
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
address: Optional[str] = None,
json_mode: bool = False,
timeout: Optional[str] = None,
) -> CommandResult:
"""
List shards of the storage node.
Args:
address: Address of wallet account.
json_mode: Print shard info as a JSON array.
endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
wallet_password: Wallet password.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
if not wallet_password:
return self._execute(
"control shards list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password(
"control shards list",
wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
)
def evacuation_start(
self,
endpoint: str,
id: Optional[str] = None,
scope: Optional[str] = None,
all: bool = False,
no_errors: bool = True,
await_mode: bool = False,
address: Optional[str] = None,
timeout: Optional[str] = None,
no_progress: bool = False,
) -> CommandResult:
"""
Objects evacuation from shard to other shards.
Args:
address: Address of wallet account
all: Process all shards
await: Block execution until evacuation is completed
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
no_errors: Skip invalid/unreadable objects (default true)
no_progress: Print progress if await provided
scope: Evacuation scope; possible values: trees, objects, all (default "all")
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation start",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_reset(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Reset evacuate objects from shard to other shards status.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation reset",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_stop(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Stop running evacuate process from shard to other shards.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation stop",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_status(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get evacuate objects from shard to other shards status.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None):
"""
Detach and close the shards
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards detach",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,155 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliStorageGroup(CliCommand):
def put(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
members: list[str],
ttl: Optional[int] = None,
bearer: Optional[str] = None,
lifetime: Optional[int] = None,
address: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Put storage group to FrostFS.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
members: ID list of storage group members.
lifetime: Storage group lifetime in epochs.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header.
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
members = ",".join(members)
return self._execute(
"storagegroup put",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
id: str,
raw: Optional[bool] = False,
ttl: Optional[int] = None,
bearer: Optional[str] = None,
lifetime: Optional[int] = None,
address: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get storage group from FrostFS.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
id: Storage group identifier.
raw: Set raw request option.
lifetime: Storage group lifetime in epochs.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header.
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"storagegroup get",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
raw: Optional[bool] = False,
ttl: Optional[int] = None,
bearer: Optional[str] = None,
lifetime: Optional[int] = None,
address: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
List storage groups in FrostFS container.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
raw: Set raw request option.
lifetime: Storage group lifetime in epochs.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header.
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"storagegroup list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def delete(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
id: str,
raw: Optional[bool] = False,
ttl: Optional[int] = None,
bearer: Optional[str] = None,
lifetime: Optional[int] = None,
address: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Delete storage group from FrostFS.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
id: Storage group identifier.
raw: Set raw request option.
lifetime: Storage group lifetime in epochs.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header.
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for an operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"storagegroup delete",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,53 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliTree(CliCommand):
def healthcheck(
self,
wallet: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get internal balance of FrostFS account
Args:
address: Address of wallet account.
owner: Owner of balance account (omit to use owner from private key).
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
Returns:
Command's result.
"""
return self._execute(
"tree healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
cid: str,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get Tree List
Args:
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
timeout: duration Timeout for the operation (default 15 s)
Returns:
Command's result.
"""
return self._execute(
"tree list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,64 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliUtil(CliCommand):
def sign_bearer_token(
self,
from_file: str,
to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
json: Optional[bool] = False,
) -> CommandResult:
"""
Sign bearer token to use it in requests.
Args:
address: Address of wallet account.
from_file: File with JSON or binary encoded bearer token to sign.
to_file: File to dump signed bearer token (default: binary encoded).
json: Dump bearer token in JSON encoding.
wallet: WIF (NEP-2) string or path to the wallet or binary key.
Returns:
Command's result.
"""
return self._execute(
"util sign bearer-token",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def sign_session_token(
self,
from_file: str,
to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""
Sign session token to use it in requests.
Args:
address: Address of wallet account.
from_file: File with JSON encoded session token to sign.
to_file: File to dump signed bearer token (default: binary encoded).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
Returns:
Command's result.
"""
return self._execute(
"util sign session-token",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False):
"""Convert representation of extended ACL table."""
return self._execute(
"util convert eacl",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,13 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliVersion(CliCommand):
def get(self) -> CommandResult:
"""
Application version and FrostFS API compatibility.
Returns:
Command's result.
"""
return self._execute("", version=True)

View file

@ -1,30 +0,0 @@
from typing import Optional
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
class GenericCli(object):
def __init__(self, cli_name: str, host: Host) -> None:
self.host = host
self.cli_name = cli_name
def __call__(
self,
args: Optional[str] = "",
pipes: Optional[str] = "",
shell: Optional[Shell] = None,
options: Optional[CommandOptions] = None,
):
if not shell:
shell = self.host.get_shell()
cli_config = self.host.get_cli_config(self.cli_name, True)
extra_args = ""
exec_path = self.cli_name
if cli_config:
extra_args = " ".join(cli_config.extra_args)
exec_path = cli_config.exec_path
cmd = f"{exec_path} {args} {extra_args} {pipes}"
return shell.exec(cmd, options)

View file

@ -1,2 +0,0 @@
from frostfs_testlib.cli.neogo.go import NeoGo
from frostfs_testlib.cli.neogo.network_type import NetworkType

View file

@ -1,134 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class NeoGoCandidate(CliCommand):
def register(
self,
address: str,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
gas: Optional[float] = None,
timeout: int = 10,
) -> CommandResult:
"""Register as a new candidate.
Args:
address: Address to register.
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wallet_password: Wallet password.
gas: Network fee to add to the transaction (prioritizing it).
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
exec_param["timeout"] = f"{timeout}s"
if wallet_password is not None:
return self._execute_with_password(
"wallet candidate register", wallet_password, **exec_param
)
if wallet_config:
return self._execute("wallet candidate register", **exec_param)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)
def unregister(
self,
address: str,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
gas: Optional[float] = None,
timeout: int = 10,
) -> CommandResult:
"""Unregister self as a candidate.
Args:
address: Address to unregister.
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wallet_password: Wallet password.
gas: Network fee to add to the transaction (prioritizing it).
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
exec_param["timeout"] = f"{timeout}s"
if wallet_password is not None:
return self._execute_with_password(
"wallet candidate unregister", wallet_password, **exec_param
)
if wallet_config:
return self._execute("wallet candidate unregister", **exec_param)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)
def vote(
self,
address: str,
candidate: str,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
gas: Optional[float] = None,
timeout: int = 10,
) -> CommandResult:
"""Votes for a validator.
Voting happens by calling "vote" method of a NEO native contract. Do not provide
candidate argument to perform unvoting.
Args:
address: Address to vote from
candidate: Public key of candidate to vote for.
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wallet_password: Wallet password.
gas: Network fee to add to the transaction (prioritizing it).
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
exec_param["timeout"] = f"{timeout}s"
if wallet_password is not None:
return self._execute_with_password(
"wallet candidate vote", wallet_password, **exec_param
)
if wallet_config:
return self._execute("wallet candidate vote", **exec_param)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)

View file

@ -1,398 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class NeoGoContract(CliCommand):
def compile(
self,
input_file: str,
out: str,
manifest: str,
config: str,
no_standards: bool = False,
no_events: bool = False,
no_permissions: bool = False,
bindings: Optional[str] = None,
) -> CommandResult:
"""Compile a smart contract to a .nef file.
Args:
input_file: Input file for the smart contract to be compiled.
out: Output of the compiled contract.
manifest: Emit contract manifest (*.manifest.json) file into separate file using
configuration input file (*.yml).
config: Configuration input file (*.yml).
no_standards: Do not check compliance with supported standards.
no_events: Do not check emitted events with the manifest.
no_permissions: Do not check if invoked contracts are allowed in manifest.
bindings: Output file for smart-contract bindings configuration.
Returns:
Command's result.
"""
return self._execute(
"contract compile",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def deploy(
self,
address: str,
input_file: str,
manifest: str,
rpc_endpoint: str,
sysgas: Optional[float] = None,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
gas: Optional[float] = None,
out: Optional[str] = None,
force: bool = False,
timeout: int = 10,
) -> CommandResult:
"""Deploy a smart contract (.nef with description)
Args:
wallet: Wallet to use to get the key for transaction signing;
conflicts with wallet_config.
wallet_config: Path to wallet config to use to get the key for transaction signing;
conflicts with wallet.
wallet_password: Wallet password.
address: Address to use as transaction signee (and gas source).
gas: Network fee to add to the transaction (prioritizing it).
sysgas: System fee to add to transaction (compensating for execution).
out: File to put JSON transaction to.
force: Do not ask for a confirmation.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
input_file: Input file for the smart contract (*.nef).
manifest: Emit contract manifest (*.manifest.json) file into separate file using
configuration input file (*.yml).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
exec_param["timeout"] = f"{timeout}s"
if wallet_password is not None:
return self._execute_with_password(
"contract deploy",
wallet_password,
**exec_param,
)
if wallet_config:
return self._execute(
"contract deploy",
**exec_param,
)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)
def generate_wrapper(
self,
out: str,
hash: str,
config: Optional[str] = None,
manifest: Optional[str] = None,
) -> CommandResult:
"""Generate wrapper to use in other contracts.
Args:
config: Configuration file to use.
manifest: Read contract manifest (*.manifest.json) file.
out: Output of the compiled contract.
hash: Smart-contract hash.
Returns:
Command's result.
"""
return self._execute(
"contract generate-wrapper",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def invokefunction(
self,
scripthash: str,
address: Optional[str] = None,
wallet: Optional[str] = None,
method: Optional[str] = None,
arguments: Optional[str] = None,
multisig_hash: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
gas: Optional[float] = None,
sysgas: Optional[float] = None,
out: Optional[str] = None,
force: bool = False,
rpc_endpoint: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Executes given (as a script hash) deployed script.
Script is executed with the given method, arguments and signers. Sender is included in
the list of signers by default with None witness scope. If you'd like to change default
sender's scope, specify it via signers parameter. See testinvokefunction documentation
for the details about parameters. It differs from testinvokefunction in that this command
sends an invocation transaction to the network.
Args:
scripthash: Function hash.
method: Call method.
arguments: Method arguments.
multisig_hash: Multisig hash.
wallet: Wallet to use to get the key for transaction signing;
conflicts with wallet_config.
wallet_config: Path to wallet config to use to get the key for transaction signing;
conflicts with wallet.
wallet_password: Wallet password.
address: Address to use as transaction signee (and gas source).
gas: Network fee to add to the transaction (prioritizing it).
sysgas: System fee to add to transaction (compensating for execution).
out: File to put JSON transaction to.
force: Force-push the transaction in case of bad VM state after test script invocation.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
multisig_hash = f"-- {multisig_hash}" or ""
post_data = f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}"
exec_param = {
param: param_value
for param, param_value in locals().items()
if param
not in [
"self",
"scripthash",
"method",
"arguments",
"multisig_hash",
"wallet_password",
]
}
exec_param["timeout"] = f"{timeout}s"
exec_param["post_data"] = post_data
if wallet_password is not None:
return self._execute_with_password(
"contract invokefunction", wallet_password, **exec_param
)
if wallet_config:
return self._execute("contract invokefunction", **exec_param)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)
def testinvokefunction(
self,
scripthash: str,
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
method: Optional[str] = None,
arguments: Optional[str] = None,
multisig_hash: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Executes given (as a script hash) deployed script.
Script is executed with the given method, arguments and signers (sender is not included
by default). If no method is given "" is passed to the script, if no arguments are given,
an empty array is passed, if no signers are given no array is passed. If signers are
specified, the first one of them is treated as a sender. All of the given arguments are
encapsulated into array before invoking the script. The script thus should follow the
regular convention of smart contract arguments (method string and an array of other
arguments).
See more information and samples in `neo-go contract testinvokefunction --help`.
Args:
scripthash: Function hash.
wallet: Wallet to use for testinvoke.
wallet_password: Wallet password.
method: Call method.
arguments: Method arguments.
multisig_hash: Multisig hash.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
multisig_hash = f"-- {multisig_hash}" if multisig_hash else ""
post_data = f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}"
exec_param = {
param: param_value
for param, param_value in locals().items()
if param
not in [
"self",
"scripthash",
"method",
"arguments",
"multisig_hash",
"wallet_password",
]
}
exec_param["timeout"] = f"{timeout}s"
exec_param["post_data"] = post_data
if wallet_password is not None:
return self._execute_with_password(
"contract testinvokefunction", wallet_password, **exec_param
)
return self._execute("contract testinvokefunction", **exec_param)
def testinvokescript(
self,
input_file: str,
rpc_endpoint: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Executes given compiled AVM instructions in NEF format.
Instructions are executed with the given set of signers not including sender by default.
See testinvokefunction documentation for the details about parameters.
Args:
input_file: Input location of the .nef file that needs to be invoked.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
exec_param = {
param: param_value for param, param_value in locals().items() if param not in ["self"]
}
exec_param["timeout"] = f"{timeout}s"
return self._execute(
"contract testinvokescript",
**exec_param,
)
def init(self, name: str, skip_details: bool = False) -> CommandResult:
"""Initialize a new smart-contract in a directory with boiler plate code.
Args:
name: Name of the smart-contract to be initialized.
skip_details: Skip filling in the projects and contract details.
Returns:
Command's result.
"""
return self._execute(
"contract init",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def inspect(
self,
input_file: Optional[str] = None,
compile: Optional[str] = None,
) -> CommandResult:
"""Creates a user readable dump of the program instructions.
Args:
input_file: Input file of the program (either .go or .nef).
compile: Compile input file (it should be go code then).
Returns:
Command's result.
"""
return self._execute(
"contract inspect",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def calc_hash(
self,
input_file: str,
manifest: str,
sender: Optional[str] = None,
) -> CommandResult:
"""Calculates hash of a contract after deployment.
Args:
input_file: Path to NEF file.
sender: Sender script hash or address.
manifest: Path to manifest file.
Returns:
Command's result.
"""
return self._execute(
"contract calc-hash",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def add_group(
self,
manifest: str,
address: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
sender: Optional[str] = None,
nef: Optional[str] = None,
) -> CommandResult:
"""Adds group to the manifest.
Args:
wallet: Wallet to use to get the key for transaction signing;
conflicts with wallet_config.
wallet_config: Path to wallet config to use to get the key for transaction signing;
conflicts with wallet.
wallet_password: Wallet password.
sender: Deploy transaction sender.
address: Account to sign group with.
nef: Path to the NEF file.
manifest: Path to the manifest.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
if wallet_password is not None:
return self._execute_with_password(
"contract manifest add-group", wallet_password, **exec_param
)
if wallet_config:
return self._execute("contract manifest add-group", **exec_param)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)

View file

@ -1,69 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.cli.neogo.network_type import NetworkType
from frostfs_testlib.shell import CommandResult
class NeoGoDb(CliCommand):
def dump(
self,
config_path: str,
out: str,
network: NetworkType = NetworkType.PRIVATE,
count: int = 0,
start: int = 0,
) -> CommandResult:
"""Dump blocks (starting with block #1) to the file.
Args:
config_path: Path to config.
network: Select network type (default: private).
count: Number of blocks to be processed (default or 0: all chain) (default: 0).
start: Block number to start from (default: 0) (default: 0).
out: Output file (stdout if not given).
Returns:
Command's result.
"""
return self._execute(
"db dump",
**{network.value: True},
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def restore(
self,
config_path: str,
input_file: str,
network: NetworkType = NetworkType.PRIVATE,
count: int = 0,
dump: Optional[str] = None,
incremental: bool = False,
) -> CommandResult:
"""Dump blocks (starting with block #1) to the file.
Args:
config_path: Path to config.
network: Select network type (default: private).
count: Number of blocks to be processed (default or 0: all chain) (default: 0).
input_file: Input file (stdin if not given).
dump: Directory for storing JSON dumps.
incremental: Use if dump is incremental.
Returns:
Command's result.
"""
return self._execute(
"db restore",
**{network.value: True},
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)

View file

@ -1,37 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.neogo.candidate import NeoGoCandidate
from frostfs_testlib.cli.neogo.contract import NeoGoContract
from frostfs_testlib.cli.neogo.db import NeoGoDb
from frostfs_testlib.cli.neogo.nep17 import NeoGoNep17
from frostfs_testlib.cli.neogo.node import NeoGoNode
from frostfs_testlib.cli.neogo.query import NeoGoQuery
from frostfs_testlib.cli.neogo.version import NeoGoVersion
from frostfs_testlib.cli.neogo.wallet import NeoGoWallet
from frostfs_testlib.shell import Shell
class NeoGo:
candidate: NeoGoCandidate
contract: NeoGoContract
db: NeoGoDb
nep17: NeoGoNep17
node: NeoGoNode
query: NeoGoQuery
version: NeoGoVersion
wallet: NeoGoWallet
def __init__(
self,
shell: Shell,
neo_go_exec_path: str,
config_path: Optional[str] = None,
):
self.candidate = NeoGoCandidate(shell, neo_go_exec_path, config_path=config_path)
self.contract = NeoGoContract(shell, neo_go_exec_path, config_path=config_path)
self.db = NeoGoDb(shell, neo_go_exec_path, config_path=config_path)
self.nep17 = NeoGoNep17(shell, neo_go_exec_path, config_path=config_path)
self.node = NeoGoNode(shell, neo_go_exec_path, config_path=config_path)
self.query = NeoGoQuery(shell, neo_go_exec_path, config_path=config_path)
self.version = NeoGoVersion(shell, neo_go_exec_path, config_path=config_path)
self.wallet = NeoGoWallet(shell, neo_go_exec_path, config_path=config_path)

View file

@ -1,240 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class NeoGoNep17(CliCommand):
def balance(
self,
address: str,
token: str,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Get address balance.
Args:
address: Address to use.
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
token: Token to use (hash or name (for NEO/GAS or imported tokens)).
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value for param, param_value in locals().items() if param not in ["self"]
}
exec_param["timeout"] = f"{timeout}s"
return self._execute(
"wallet nep17 balance",
**exec_param,
)
def import_token(
self,
address: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
token: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Import NEP-17 token to a wallet.
Args:
address: Token contract address or hash in LE.
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
token: Token to use (hash or name (for NEO/GAS or imported tokens)).
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value for param, param_value in locals().items() if param not in ["self"]
}
exec_param["timeout"] = f"{timeout}s"
return self._execute(
"wallet nep17 import",
**exec_param,
)
def info(
self,
token: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
) -> CommandResult:
"""Print imported NEP-17 token info.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
token: Token to use (hash or name (for NEO/GAS or imported tokens)).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet nep17 info",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def remove(
self,
token: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
force: bool = False,
) -> CommandResult:
"""Remove NEP-17 token from the wallet.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
token: Token to use (hash or name (for NEO/GAS or imported tokens)).
force: Do not ask for a confirmation.
Returns:
Command's result.
"""
return self._execute(
"wallet nep17 remove",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def transfer(
self,
token: str,
to_address: str,
rpc_endpoint: str,
sysgas: Optional[float] = None,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
out: Optional[str] = None,
from_address: Optional[str] = None,
force: bool = False,
gas: Optional[float] = None,
amount: float = 0,
timeout: int = 10,
) -> CommandResult:
"""Transfers specified NEP-17 token amount.
Transfer is executed with optional 'data' parameter and cosigners list attached to the
transfer. See 'contract testinvokefunction' documentation for the details about 'data'
parameter and cosigners syntax. If no 'data' is given then default nil value will be used.
If no cosigners are given then the sender with CalledByEntry scope will be used as the only
signer.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wallet_password: Wallet password.
out: File to put JSON transaction to.
from_address: Address to send an asset from.
to_address: Address to send an asset to.
token: Token to use (hash or name (for NEO/GAS or imported tokens)).
force: Do not ask for a confirmation.
gas: Network fee to add to the transaction (prioritizing it).
sysgas: System fee to add to transaction (compensating for execution).
force: Do not ask for a confirmation.
amount: Amount of asset to send.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
exec_param["timeout"] = f"{timeout}s"
if wallet_password is not None:
return self._execute_with_password(
"wallet nep17 transfer",
wallet_password,
**exec_param,
)
if wallet_config:
return self._execute(
"wallet nep17 transfer",
**exec_param,
)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)
def multitransfer(
self,
token: str,
to_address: list[str],
sysgas: float,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
out: Optional[str] = None,
from_address: Optional[str] = None,
force: bool = False,
gas: Optional[float] = None,
amount: float = 0,
timeout: int = 10,
) -> CommandResult:
"""Transfer NEP-17 tokens to multiple recipients.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
out: File to put JSON transaction to.
from_address: Address to send an asset from.
to_address: Address to send an asset to.
token: Token to use (hash or name (for NEO/GAS or imported tokens)).
force: Do not ask for a confirmation.
gas: Network fee to add to the transaction (prioritizing it).
sysgas: System fee to add to transaction (compensating for execution).
force: Do not ask for a confirmation.
amount: Amount of asset to send.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value for param, param_value in locals().items() if param not in ["self"]
}
exec_param["timeout"] = f"{timeout}s"
return self._execute(
"wallet nep17 multitransfer",
**exec_param,
)

View file

@ -1,7 +0,0 @@
from enum import Enum
class NetworkType(Enum):
PRIVATE = "privnet"
MAIN = "mainnet"
TEST = "testnet"

View file

@ -1,16 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.cli.neogo.network_type import NetworkType
from frostfs_testlib.shell import CommandResult
class NeoGoNode(CliCommand):
def start(self, network: NetworkType = NetworkType.PRIVATE) -> CommandResult:
"""Start a NEO node.
Args:
network: Select network type (default: private).
Returns:
Command's result.
"""
return self._execute("start", **{network.value: True})

View file

@ -1,100 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class NeoGoQuery(CliCommand):
def candidates(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult:
"""Get candidates and votes.
Args:
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
return self._execute(
"query candidates",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def committee(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult:
"""Get committee list.
Args:
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
return self._execute(
"query committee",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def height(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult:
"""Get node height.
Args:
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
return self._execute(
"query height",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def tx(self, tx_hash: str, rpc_endpoint: str, timeout: str = "10s") -> CommandResult:
"""Query transaction status.
Args:
tx_hash: Hash of transaction.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
return self._execute(
f"query tx {tx_hash}",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self", "tx_hash"]
},
)
def voter(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult:
"""Print NEO holder account state.
Args:
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
return self._execute(
"query voter",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)

View file

@ -1,12 +0,0 @@
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class NeoGoVersion(CliCommand):
def get(self) -> CommandResult:
"""Application version.
Returns:
Command's result.
"""
return self._execute("", version=True)

View file

@ -1,381 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class NeoGoWallet(CliCommand):
def claim(
self,
address: str,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Claim GAS.
Args:
address: Address to claim GAS for.
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value for param, param_value in locals().items() if param not in ["self"]
}
exec_param["timeout"] = f"{timeout}s"
return self._execute(
"wallet claim",
**exec_param,
)
def init(
self,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
account: bool = False,
) -> CommandResult:
"""Create a new wallet.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
account: Create a new account.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet init",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def convert(
self,
out: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
) -> CommandResult:
"""Convert addresses from existing NEO2 NEP6-wallet to NEO3 format.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
out: Where to write converted wallet.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet convert",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def create(
self,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
) -> CommandResult:
"""Add an account to the existing wallet.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet create",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def dump(
self,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
decrypt: bool = False,
) -> CommandResult:
"""Check and dump an existing NEO wallet.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
decrypt: Decrypt encrypted keys.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet dump",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def dump_keys(
self,
address: Optional[str] = None,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
) -> CommandResult:
"""Check and dump an existing NEO wallet.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
address: Address to print public keys for.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet dump-keys",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def export(
self,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
decrypt: bool = False,
) -> CommandResult:
"""Export keys for address.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
decrypt: Decrypt encrypted keys.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet export",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def import_wif(
self,
wif: str,
name: str,
contract: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
) -> CommandResult:
"""Import WIF of a standard signature contract.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wif: WIF to import.
name: Optional account name.
contract: Verification script for custom contracts.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet import",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def import_multisig(
self,
wif: str,
name: Optional[str] = None,
min_number: int = 0,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
) -> CommandResult:
"""Import multisig contract.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wif: WIF to import.
name: Optional account name.
min_number: Minimal number of signatures (default: 0).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet import-multisig",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def import_deployed(
self,
wif: str,
rpc_endpoint: str,
name: Optional[str] = None,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
contract: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Import deployed contract.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wif: WIF to import.
name: Optional account name.
contract: Contract hash or address.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value for param, param_value in locals().items() if param not in ["self"]
}
exec_param["timeout"] = f"{timeout}s"
return self._execute(
"wallet import-deployed",
**exec_param,
)
def remove(
self,
address: str,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
force: bool = False,
) -> CommandResult:
"""Remove an account from the wallet.
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
address: Account address or hash in LE form to be removed.
force: Do not ask for a confirmation.
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
return self._execute(
"wallet remove",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
)
def sign(
self,
input_file: str,
address: str,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
wallet_config: Optional[str] = None,
wallet_password: Optional[str] = None,
out: Optional[str] = None,
timeout: int = 10,
) -> CommandResult:
"""Cosign transaction with multisig/contract/additional account.
Signs the given (in the input file) context (which must be a transaction signing context)
for the given address using the given wallet. This command can output the resulting JSON
(with additional signature added) right to the console (if no output file and no RPC
endpoint specified) or into a file (which can be the same as input one). If an RPC endpoint
is given it'll also try to construct a complete transaction and send it via RPC (printing
its hash if everything is OK).
Args:
wallet: Target location of the wallet file ('-' to read from stdin);
conflicts with --wallet-config flag.
wallet_config: Target location of the wallet config file; conflicts with --wallet flag.
wallet_password: Wallet password.
out: File to put JSON transaction to.
input_file: File with JSON transaction.
address: Address to use.
rpc_endpoint: RPC node address.
timeout: Timeout for the operation (default: 10s).
Returns:
Command's result.
"""
assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG
exec_param = {
param: param_value
for param, param_value in locals().items()
if param not in ["self", "wallet_password"]
}
exec_param["timeout"] = f"{timeout}s"
if wallet_password is not None:
return self._execute_with_password("wallet sign", wallet_password, **exec_param)
if wallet_config:
return self._execute("wallet sign", **exec_param)
raise Exception(self.WALLET_PASSWD_ERROR_MSG)

View file

@ -1,102 +0,0 @@
import re
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus
class NetmapParser:
@staticmethod
def netinfo(output: str) -> NodeNetInfo:
regexes = {
"epoch": r"Epoch: (?P<epoch>\d+)",
"network_magic": r"Network magic: (?P<network_magic>.*$)",
"time_per_block": r"Time per block: (?P<time_per_block>\d+\w+)",
"container_fee": r"Container fee: (?P<container_fee>\d+)",
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
"maximum_count_of_data_shards": r"Maximum count of data shards: (?P<maximum_count_of_data_shards>\d+)",
"maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P<maximum_count_of_parity_shards>\d+)",
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
}
parse_result = {}
for key, regex in regexes.items():
search_result = re.search(regex, output, flags=re.MULTILINE)
if search_result == None:
parse_result[key] = None
continue
parse_result[key] = search_result[key].strip()
node_netinfo = NodeNetInfo(**parse_result)
return node_netinfo
@staticmethod
def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]:
"""The code will parse each line and return each node as dataclass."""
netmap_nodes = output.split("Node ")[1:]
dataclasses_netmap = []
result_netmap = {}
regexes = {
"node_id": r"\d+: (?P<node_id>\w+)",
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
"node_status": r"(?P<node_status>ONLINE|MAINTENANCE|OFFLINE)",
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
"continent": r"Continent: (?P<continent>\w+)",
"country": r"Country: (?P<country>\w+)",
"country_code": r"CountryCode: (?P<country_code>\w+)",
"external_address": r"ExternalAddr: (?P<external_address>/ip[4].+?)$",
"location": r"Location: (?P<location>\w+.*)",
"node": r"Node: (?P<node>\d+\.\d+\.\d+\.\d+)",
"price": r"Price: (?P<price>\d+)",
"sub_div": r"SubDiv: (?P<sub_div>.*)",
"sub_div_code": r"SubDivCode: (?P<sub_div_code>\w+)",
"un_locode": r"UN-LOCODE: (?P<un_locode>\w+.*)",
"role": r"role: (?P<role>\w+)",
}
for node in netmap_nodes:
for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result is None:
result_netmap[key] = None
continue
if key == "node_data_ips":
result_netmap[key] = search_result[key].strip().split(" ")
continue
if key == "external_address":
result_netmap[key] = search_result[key].strip().split(",")
continue
if key == "node_status":
result_netmap[key] = NodeStatus(search_result[key].strip().lower())
continue
result_netmap[key] = search_result[key].strip()
dataclasses_netmap.append(NodeNetmapInfo(**result_netmap))
return dataclasses_netmap
@staticmethod
def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None:
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
for snapshot in snapshot_nodes:
for endpoint in snapshot.external_address:
if rpc_endpoint.split(":")[0] in endpoint:
return snapshot
@staticmethod
def node_info(output: dict) -> NodeInfo:
data_dict = {"attributes": {}}
for key, value in output.items():
if key != "attributes":
data_dict[key] = value
for attribute in output["attributes"]:
data_dict["attributes"][attribute["key"]] = attribute["value"]
return NodeInfo(**data_dict)

View file

@ -1,5 +0,0 @@
from frostfs_testlib.clients.http.http_client import HttpClient
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper
from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient

View file

@ -1 +0,0 @@
from frostfs_testlib.clients.http.http_client import HttpClient

View file

@ -1,152 +0,0 @@
import io
import json
import logging
import logging.config
from typing import Mapping, Sequence
import httpx
from frostfs_testlib import reporter
timeout = httpx.Timeout(60, read=150)
LOGGING_CONFIG = {
"disable_existing_loggers": False,
"version": 1,
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
"formatters": {
"http": {
"format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"loggers": {
"httpx": {
"handlers": ["default"],
"level": "ERROR",
},
"httpcore": {
"handlers": ["default"],
"level": "ERROR",
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("NeoLogger")
class HttpClient:
@reporter.step("Send {method} request to {url}")
def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response:
transport = httpx.HTTPTransport(verify=False, retries=5)
client = httpx.Client(timeout=timeout, transport=transport)
response = client.request(method, url, **kwargs)
self._attach_response(response, **kwargs)
# logger.info(f"Response: {response.status_code} => {response.text}")
if expected_status_code:
assert (
response.status_code == expected_status_code
), f"Got {response.status_code} response code while {expected_status_code} expected"
return response
@classmethod
def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None:
try:
content = readable.read()
except Exception as e:
logger.warning(f"Unable to read file: {str(e)}")
return None
if not content:
return None
request_body = None
try:
request_body = json.loads(content)
except (json.JSONDecodeError, UnicodeDecodeError) as e:
logger.warning(f"Unable to convert body to json: {str(e)}")
if request_body is not None:
return json.dumps(request_body, default=str, indent=4)
try:
request_body = content.decode()
except UnicodeDecodeError as e:
logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}")
request_body = content if request_body is None else request_body
request_body = "<large text data>" if len(request_body) > 1000 else request_body
return request_body
@classmethod
def _parse_files(cls, files: Mapping | Sequence | None) -> dict:
filepaths = {}
if not files:
return filepaths
if isinstance(files, Sequence):
items = files
elif isinstance(files, Mapping):
items = files.items()
else:
raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}")
for name, file in items:
if isinstance(file, io.IOBase):
filepaths[name] = file.name
elif isinstance(file, Sequence):
filepaths[name] = file[1].name
return filepaths
@classmethod
def _attach_response(cls, response: httpx.Response, **kwargs):
request = response.request
request_headers = json.dumps(dict(request.headers), default=str, indent=4)
request_body = cls._parse_body(request)
files = kwargs.get("files")
request_files = cls._parse_files(files)
response_headers = json.dumps(dict(response.headers), default=str, indent=4)
response_body = cls._parse_body(response)
report = (
f"Method: {request.method}\n\n"
+ f"URL: {request.url}\n\n"
+ f"Request Headers: {request_headers}\n\n"
+ (f"Request Body: {request_body}\n\n" if request_body else "")
+ (f"Request Files: {request_files}\n\n" if request_files else "")
+ f"Response Status Code: {response.status_code}\n\n"
+ f"Response Headers: {response_headers}\n\n"
+ (f"Response Body: {response_body}\n\n" if response_body else "")
)
curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files)
reporter.attach(report, "Requests Info")
reporter.attach(curl_request, "CURL")
cls._write_log(curl_request, response_body, response.status_code)
@classmethod
def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str:
excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"}
headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers)
data = f" -d '{data}'" if data else ""
for name, path in files.items():
data += f' -F "{name}=@{path}"'
# Option -k means no verify SSL
return f"curl {url} -X {method} {headers}{data} -k"
@classmethod
def _write_log(cls, curl: str, res_body: str, res_code: int) -> None:
if res_body:
curl += f"\nResponse: {res_code}\n{res_body}"
logger.info(f"{curl}")

View file

@ -1,3 +0,0 @@
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,16 +0,0 @@
import re
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.clients.s3 import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode
class CurlBucketContainerResolver(BucketContainerResolver):
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
curl = GenericCli("curl", node.host)
output = curl(f"-I http://127.0.0.1:8084/{bucket_name}")
pattern = r"X-Container-Id: (\S+)"
cid = re.findall(pattern, output.stdout)
if cid:
return cid[0]
return None

View file

@ -1,623 +0,0 @@
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Literal, Optional, Union
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
from frostfs_testlib.utils.file_utils import TestFile
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {"Key": key}
objs_list.append(obj_dict)
objs_dict = {"Objects": objs_list}
return objs_dict
class VersioningStatus(HumanReadableEnum):
UNDEFINED = None
ENABLED = "Enabled"
SUSPENDED = "Suspended"
class ACL:
PRIVATE = "private"
PUBLIC_READ = "public-read"
PUBLIC_READ_WRITE = "public-read-write"
AUTHENTICATED_READ = "authenticated-read"
AWS_EXEC_READ = "aws-exec-read"
BUCKET_OWNER_READ = "bucket-owner-read"
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
LOG_DELIVERY_WRITE = "log-delivery-write"
class BucketContainerResolver(ABC):
@abstractmethod
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
"""
Resolve Container ID from bucket name
Args:
node: node from where we want to resolve
bucket_name: name of the bucket
**kwargs: any other required params
Returns: Container ID
"""
raise NotImplementedError("Call from abstract class")
class S3ClientWrapper(HumanReadableABC):
access_key_id: str
secret_access_key: str
profile: str
region: str
s3gate_endpoint: str
iam_endpoint: str
@abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
pass
@abstractmethod
def set_endpoint(self, s3gate_endpoint: str):
"""Set endpoint"""
@abstractmethod
def set_iam_endpoint(self, iam_endpoint: str):
"""Set iam endpoint"""
@abstractmethod
def create_bucket(
self,
bucket: Optional[str] = None,
object_lock_enabled_for_bucket: Optional[bool] = None,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
grant_full_control: Optional[str] = None,
location_constraint: Optional[str] = None,
) -> str:
"""Create a bucket."""
# BUCKET METHODS #
@abstractmethod
def list_buckets(self) -> list[str]:
"""List buckets."""
@abstractmethod
def delete_bucket(self, bucket: str) -> None:
"""Delete bucket"""
@abstractmethod
def head_bucket(self, bucket: str) -> None:
"""This action is useful to determine if a bucket exists and you have permission to access it.
The action returns a 200 OK if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD request
returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code.
A message body is not included, so you cannot determine the exception beyond these error codes.
"""
@abstractmethod
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
"""Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
EnabledEnables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
SuspendedDisables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state
"""
@abstractmethod
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
"""Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
"""
@abstractmethod
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
"""Sets the tags for a bucket."""
@abstractmethod
def get_bucket_tagging(self, bucket: str) -> list:
"""Returns the tag set associated with the Outposts bucket."""
@abstractmethod
def delete_bucket_tagging(self, bucket: str) -> None:
"""Deletes the tags from the bucket."""
@abstractmethod
def get_bucket_acl(self, bucket: str) -> dict:
"""This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket."""
@abstractmethod
def put_bucket_acl(
self,
bucket: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
"""Sets the permissions on an existing bucket using access control lists (ACL)."""
@abstractmethod
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
"""Places an Object Lock configuration on the specified bucket.
The rule specified in the Object Lock configuration will be applied by
default to every new object placed in the specified bucket."""
@abstractmethod
def get_object_lock_configuration(self, bucket: str) -> dict:
"""Gets the Object Lock configuration for a bucket.
The rule specified in the Object Lock configuration will be applied by
default to every new object placed in the specified bucket."""
@abstractmethod
def get_bucket_policy(self, bucket: str) -> str:
"""Returns the policy of a specified bucket."""
@abstractmethod
def delete_bucket_policy(self, bucket: str) -> str:
"""Deletes the policy of a specified bucket."""
@abstractmethod
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
"""Applies S3 bucket policy to an S3 bucket."""
@abstractmethod
def get_bucket_cors(self, bucket: str) -> dict:
"""Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket."""
@abstractmethod
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
"""Sets the cors configuration for your bucket. If the configuration exists, S3 replaces it."""
@abstractmethod
def delete_bucket_cors(self, bucket: str) -> None:
"""Deletes the cors configuration information set for the bucket."""
@abstractmethod
def get_bucket_location(self, bucket: str) -> str:
"""Returns the LocationConstraint the bucket resides in. You can set the it
using the LocationConstraint request parameter in a CreateBucket request."""
# END OF BUCKET METHODS #
# OBJECT METHODS #
@abstractmethod
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
A 200 OK response can contain valid or invalid XML. Make sure to design your application
to parse the contents of the response and handle it appropriately.
"""
@abstractmethod
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
A 200 OK response can contain valid or invalid XML. Make sure to design your application
to parse the contents of the response and handle it appropriately.
"""
@abstractmethod
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
"""Returns metadata about all versions of the objects in a bucket."""
@abstractmethod
def list_delete_markers(self, bucket: str, full_output: bool = False) -> dict:
"""Returns metadata about all delete markers of the objects in a bucket."""
@abstractmethod
def put_object(
self,
bucket: str,
filepath: str,
key: Optional[str] = None,
metadata: Optional[dict] = None,
tagging: Optional[str] = None,
acl: Optional[str] = None,
object_lock_mode: Optional[str] = None,
object_lock_retain_until_date: Optional[datetime] = None,
object_lock_legal_hold_status: Optional[str] = None,
grant_full_control: Optional[str] = None,
grant_read: Optional[str] = None,
) -> str:
"""Adds an object to a bucket."""
@abstractmethod
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
"""The HEAD action retrieves metadata from an object without returning the object itself.
This action is useful if you're only interested in an object's metadata."""
@abstractmethod
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
"""Removes the null version (if there is one) of an object and inserts a delete marker,
which becomes the latest version of the object. If there isn't a null version,
S3 does not remove any objects but will still respond that the command was successful."""
@abstractmethod
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
"""This action enables you to delete multiple objects from a bucket
using a single HTTP request. If you know the object keys that
you want to delete, then this action provides a suitable alternative
to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete."""
@abstractmethod
def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
"""Delete object versions"""
@abstractmethod
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
"""Delete object versions without delete markers"""
@abstractmethod
def put_object_acl(
self,
bucket: str,
key: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
"""Uses the acl subresource to set the access control
list (ACL) permissions for a new or existing object in an S3 bucket."""
@abstractmethod
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
"""Returns the access control list (ACL) of an object."""
@abstractmethod
def copy_object(
self,
source_bucket: str,
source_key: str,
bucket: Optional[str] = None,
key: Optional[str] = None,
acl: Optional[str] = None,
metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None,
metadata: Optional[dict] = None,
tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None,
tagging: Optional[str] = None,
) -> str:
"""Creates a copy of an object"""
@abstractmethod
def get_object(
self,
bucket: str,
key: str,
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> dict | TestFile:
"""Retrieves objects from S3."""
@abstractmethod
def create_multipart_upload(self, bucket: str, key: str) -> str:
"""This action initiates a multipart upload and returns an upload ID.
This upload ID is used to associate all of the parts in the specific multipart upload.
You specify this upload ID in each of your subsequent upload part requests (see UploadPart).
You also include this upload ID in the final request to either complete or abort the multipart upload request."""
@abstractmethod
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
"""This action lists in-progress multipart uploads.
An in-progress multipart upload is a multipart upload that has been initiated
using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This action returns at most 1,000 multipart uploads in the response."""
@abstractmethod
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
"""This action aborts a multipart upload. After a multipart upload is aborted,
no additional parts can be uploaded using that upload ID.
The storage consumed by any previously uploaded parts will be freed.
However, if any part uploads are currently in progress, those part
uploads might or might not succeed. As a result, it might be necessary to
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
@abstractmethod
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
"""Uploads a part in a multipart upload."""
@abstractmethod
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
"""Uploads a part by copying data from an existing object as data source."""
@abstractmethod
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
"""Lists the parts that have been uploaded for a specific multipart upload."""
@abstractmethod
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
"""Completes a multipart upload by assembling previously uploaded parts."""
@abstractmethod
def put_object_retention(
self,
bucket: str,
key: str,
retention: dict,
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
) -> None:
"""Places an Object Retention configuration on an object."""
@abstractmethod
def put_object_legal_hold(
self,
bucket: str,
key: str,
legal_hold_status: Literal["ON", "OFF"],
version_id: Optional[str] = None,
) -> None:
"""Applies a legal hold configuration to the specified object."""
@abstractmethod
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
"""Sets the tag-set for an object."""
@abstractmethod
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
"""Returns the tag-set of an object."""
@abstractmethod
def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None:
"""Removes the entire tag set from the specified object."""
@abstractmethod
def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict:
"""Adds or updates bucket lifecycle configuration"""
@abstractmethod
def get_bucket_lifecycle_configuration(self, bucket: str) -> dict:
"""Gets bucket lifecycle configuration"""
@abstractmethod
def delete_bucket_lifecycle(self, bucket: str) -> dict:
"""Deletes bucket lifecycle"""
@abstractmethod
def get_object_attributes(
self,
bucket: str,
key: str,
attributes: list[str],
version_id: str = "",
max_parts: int = 0,
part_number: int = 0,
full_output: bool = True,
) -> dict:
"""Retrieves all the metadata from an object without returning the object itself."""
@abstractmethod
def sync(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
"""sync directory TODO: Add proper description"""
@abstractmethod
def cp(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
"""cp directory TODO: Add proper description"""
@abstractmethod
def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str:
"""Creates presign URL"""
# END OF OBJECT METHODS #
# IAM METHODS #
@abstractmethod
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
"""Adds the specified user to the specified group"""
@abstractmethod
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified IAM group"""
@abstractmethod
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified user"""
@abstractmethod
def iam_create_access_key(self, user_name: str) -> dict:
"""Creates a new AWS secret access key and access key ID for the specified user"""
@abstractmethod
def iam_create_group(self, group_name: str) -> dict:
"""Creates a new group"""
@abstractmethod
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
"""Creates a new managed policy for your AWS account"""
@abstractmethod
def iam_create_user(self, user_name: str) -> dict:
"""Creates a new IAM user for your AWS account"""
@abstractmethod
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
"""Deletes the access key pair associated with the specified IAM user"""
@abstractmethod
def iam_delete_group(self, group_name: str) -> dict:
"""Deletes the specified IAM group"""
@abstractmethod
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM group"""
@abstractmethod
def iam_delete_policy(self, policy_arn: str) -> dict:
"""Deletes the specified managed policy"""
@abstractmethod
def iam_delete_user(self, user_name: str) -> dict:
"""Deletes the specified IAM user"""
@abstractmethod
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM user"""
@abstractmethod
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified IAM group"""
@abstractmethod
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified user"""
@abstractmethod
def iam_get_group(self, group_name: str) -> dict:
"""Returns a list of IAM users that are in the specified IAM group"""
@abstractmethod
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_get_policy(self, policy_arn: str) -> dict:
"""Retrieves information about the specified managed policy"""
@abstractmethod
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
"""Retrieves information about the specified version of the specified managed policy"""
@abstractmethod
def iam_get_user(self, user_name: str) -> dict:
"""Retrieves information about the specified IAM user"""
@abstractmethod
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_list_access_keys(self, user_name: str) -> dict:
"""Returns information about the access key IDs associated with the specified IAM user"""
@abstractmethod
def iam_list_attached_group_policies(self, group_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM group"""
@abstractmethod
def iam_list_attached_user_policies(self, user_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM user"""
@abstractmethod
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
"""Lists all IAM users, groups, and roles that the specified managed policy is attached to"""
@abstractmethod
def iam_list_group_policies(self, group_name: str) -> dict:
"""Lists the names of the inline policies that are embedded in the specified IAM group"""
@abstractmethod
def iam_list_groups(self) -> dict:
"""Lists the IAM groups"""
@abstractmethod
def iam_list_groups_for_user(self, user_name: str) -> dict:
"""Lists the IAM groups that the specified IAM user belongs to"""
@abstractmethod
def iam_list_policies(self) -> dict:
"""Lists all the managed policies that are available in your AWS account"""
@abstractmethod
def iam_list_policy_versions(self, policy_arn: str) -> dict:
"""Lists information about the versions of the specified managed policy"""
@abstractmethod
def iam_list_user_policies(self, user_name: str) -> dict:
"""Lists the names of the inline policies embedded in the specified IAM user"""
@abstractmethod
def iam_list_users(self) -> dict:
"""Lists the IAM users"""
@abstractmethod
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
"""Removes the specified user from the specified group"""
@abstractmethod
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM group"""
@abstractmethod
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM user"""
@abstractmethod
def iam_tag_user(self, user_name: str, tags: list) -> dict:
"""Adds one or more tags to an IAM user"""
@abstractmethod
def iam_list_user_tags(self, user_name: str) -> dict:
"""List tags of IAM user"""
@abstractmethod
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
"""Removes the specified tags from the user"""
# MFA methods
@abstractmethod
def iam_create_virtual_mfa_device(
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
) -> tuple:
"""Creates a new virtual MFA device"""
@abstractmethod
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
"""Deactivates the specified MFA device and removes it from association with the user name"""
@abstractmethod
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
"""Deletes a virtual MFA device"""
@abstractmethod
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
"""Enables the specified MFA device and associates it with the specified IAM user"""
@abstractmethod
def iam_list_virtual_mfa_devices(self) -> dict:
"""Lists the MFA devices for an IAM user"""
@abstractmethod
def sts_get_session_token(
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
) -> tuple:
"""Get session token for user"""

View file

@ -1,149 +0,0 @@
import hashlib
import logging
import xml.etree.ElementTree as ET
import httpx
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import Credentials
from frostfs_testlib import reporter
from frostfs_testlib.clients import HttpClient
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
DEFAULT_TIMEOUT = 60.0
class S3HttpClient:
def __init__(
self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.http_client = HttpClient()
self.credentials = Credentials(access_key_id, secret_access_key)
self.profile = profile
self.region = region
self.iam_endpoint: str = None
self.s3gate_endpoint: str = None
self.service: str = None
self.signature: SigV4Auth = None
self.set_endpoint(s3gate_endpoint)
def _to_s3_header(self, header: str) -> dict:
replacement_map = {
"Acl": "ACL",
"_": "-",
}
result = header
if not header.startswith("x_amz"):
result = header.title()
for find, replace in replacement_map.items():
result = result.replace(find, replace)
return result
def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None):
exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"]
return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None}
def _create_aws_request(
self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None
) -> AWSRequest:
data = b""
if content is not None:
if isinstance(content, TestFile):
with open(content, "rb") as io_content:
data = io_content.read()
elif isinstance(content, str):
data = bytes(content, encoding="utf-8")
elif isinstance(content, bytes):
data = content
else:
raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}")
headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest()
aws_request = AWSRequest(method, url, headers, data, params)
self.signature.add_auth(aws_request)
return aws_request
def _exec_request(
self,
method: str,
url: str,
headers: dict,
content: str | bytes | TestFile = None,
params: dict = None,
timeout: float = DEFAULT_TIMEOUT,
) -> dict:
aws_request = self._create_aws_request(method, url, headers, content, params)
response = self.http_client.send(
aws_request.method,
aws_request.url,
headers=dict(aws_request.headers),
data=aws_request.data,
params=aws_request.params,
timeout=timeout,
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
raise httpx.HTTPStatusError(response.text, request=response.request, response=response)
root = ET.fromstring(response.read())
data = {
"LastModified": root.find(".//LastModified").text,
"ETag": root.find(".//ETag").text,
}
if response.headers.get("x-amz-version-id"):
data["VersionId"] = response.headers.get("x-amz-version-id")
return data
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str):
if self.s3gate_endpoint == s3gate_endpoint:
return
self.s3gate_endpoint = s3gate_endpoint
self.service = "s3"
self.signature = SigV4Auth(self.credentials, self.service, self.region)
@reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
if self.iam_endpoint == iam_endpoint:
return
self.iam_endpoint = iam_endpoint
self.service = "iam"
self.signature = SigV4Auth(self.credentials, self.service, self.region)
@reporter.step("Patch object S3")
def patch_object(
self,
bucket: str,
key: str,
content: str | bytes | TestFile,
content_range: str,
version_id: str = None,
if_match: str = None,
if_unmodified_since: str = None,
x_amz_expected_bucket_owner: str = None,
timeout: float = DEFAULT_TIMEOUT,
) -> dict:
if content_range and not content_range.startswith("bytes"):
content_range = f"bytes {content_range}/*"
url = f"{self.s3gate_endpoint}/{bucket}/{key}"
headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"])
params = {"VersionId": version_id} if version_id is not None else None
return self._exec_request("PATCH", url, headers, content, params, timeout=timeout)

View file

@ -1,47 +0,0 @@
import re
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.shell import LocalShell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import string_utils
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
@reporter.step("Init S3 Credentials using Authmate CLI")
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes
shell = LocalShell()
wallet = user.wallet
endpoint = cluster_node.storage_node.get_rpc_endpoint()
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
# unique short bucket name
bucket = string_utils.unique_name("bucket-")
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate.secret.issue(
wallet=wallet.path,
peer=endpoint,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=location_constraints,
container_friendly_name=bucket,
).stdout
aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id"))
aws_secret_access_key = str(
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group("aws_secret_access_key")
)
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
containers_list = list_containers(wallet, shell, endpoint)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key)
return user.s3_credentials

View file

@ -1,51 +0,0 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Optional
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
@dataclass
class S3Credentials:
access_key: str
secret_key: str
@dataclass
class User:
name: str
attributes: dict[str, Any] = field(default_factory=dict)
wallet: WalletInfo | None = None
s3_credentials: S3Credentials | None = None
class S3CredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials:
raise NotImplementedError("Directly called abstract class?")
class GrpcCredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo:
raise NotImplementedError("Directly called abstract class?")
class CredentialsProvider(object):
S3: S3CredentialsProvider
GRPC: GrpcCredentialsProvider
def __init__(self, cluster: Cluster) -> None:
config = cluster.cluster_nodes[0].host.config
s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name)
self.S3 = s3_cls(cluster)
grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name)
self.GRPC = grpc_cls(cluster)

View file

@ -1,14 +0,0 @@
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
class WalletFactoryProvider(GrpcCredentialsProvider):
@reporter.step("Init gRPC Credentials using wallet generation")
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
wallet_factory = WalletFactory(ASSETS_DIR, LocalShell())
user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS)
return user.wallet

View file

@ -1,10 +0,0 @@
class Options:
DEFAULT_SHELL_TIMEOUT = 120
@staticmethod
def get_default_shell_timeout():
return Options.DEFAULT_SHELL_TIMEOUT
@staticmethod
def set_default_shell_timeout(value: int):
Options.DEFAULT_SHELL_TIMEOUT = value

View file

@ -1,52 +0,0 @@
import logging
import os
from datetime import datetime
from importlib.metadata import entry_points
import pytest
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.hosting.hosting import Hosting
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session", autouse=True)
def session_start_time():
start_time = datetime.utcnow()
return start_time
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler())
reporter.get_reporter().register_handler(reporter.StepsLogger())
logging.getLogger("paramiko").setLevel(logging.INFO)
# Register Services for cluster
registry = get_service_registry()
services = entry_points(group="frostfs.testlib.services")
for svc in services:
registry.register_service(svc.name, svc.load())
@pytest.fixture(scope="session")
def temp_directory(configure_testlib):
with reporter.step("Prepare tmp directory"):
full_path = ASSETS_DIR
if not os.path.exists(full_path):
os.mkdir(full_path)
return full_path
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
return hosting_instance

View file

@ -1,109 +0,0 @@
from typing import Callable
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.failover_utils import check_services_status
class BasicHealthcheck(Healthcheck):
def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]):
issues: list[str] = []
for check, kwargs in checks.items():
issue = check(cluster_node, **kwargs)
if issue:
issues.append(issue)
assert not issues, "Issues found:\n" + "\n".join(issues)
@wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}")
def full_healthcheck(self, cluster_node: ClusterNode):
checks = {
self.storage_healthcheck: {},
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}")
def startup_healthcheck(self, cluster_node: ClusterNode):
checks = {
self.storage_healthcheck: {},
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}")
def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._storage_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}")
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services
checks = {
check_services_status: {
"service_list": svcs_to_check,
"expected_status": "active",
},
self._check_services: {"services": svcs_to_check},
}
self._perform(cluster_node, checks)
def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]):
for svc in services:
result = svc.service_healthcheck()
if result == False:
return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}."
@reporter.step("Storage healthcheck on {cluster_node}")
def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
result = storage_node_healthcheck(cluster_node.storage_node)
self._gather_socket_info(cluster_node)
if result.health_status != "READY" or result.network_status != "ONLINE":
return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}"
@reporter.step("Tree healthcheck on {cluster_node}")
def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
host = cluster_node.host
service_config = host.get_service_config(cluster_node.storage_node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
remote_cli = FrostfsCli(
shell,
host.get_cli_config(FROSTFS_CLI_EXEC).exec_path,
config_file=wallet_config_path,
)
result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080")
if result.return_code != 0:
return (
f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}"
)
@reporter.step("Gather socket info for {cluster_node}")
def _gather_socket_info(self, cluster_node: ClusterNode):
cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False))

View file

@ -1,25 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.storage.cluster import ClusterNode
class Healthcheck(ABC):
@abstractmethod
def full_healthcheck(self, cluster_node: ClusterNode):
"""Perform full healthcheck on the target cluster node"""
@abstractmethod
def startup_healthcheck(self, cluster_node: ClusterNode):
"""Perform healthcheck required on startup of target cluster node"""
@abstractmethod
def storage_healthcheck(self, cluster_node: ClusterNode):
"""Perform storage service healthcheck on target cluster node"""
@abstractmethod
def services_healthcheck(self, cluster_node: ClusterNode):
"""Perform service status check on target cluster node"""
@abstractmethod
def tree_healthcheck(self, cluster_node: ClusterNode):
"""Perform tree healthcheck on target cluster node"""

View file

@ -1,31 +0,0 @@
import pytest
@pytest.hookimpl(specname="pytest_collection_modifyitems")
def pytest_add_frostfs_marker(items: list[pytest.Item]):
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
# nodeid = full path of the test
# 1. plugins
# 2. testlib itself
for item in items:
location = item.location[0]
if "frostfs" in location and "plugin" not in location and "testlib" not in location:
item.add_marker("frostfs")
# pytest hook. Do not rename
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(items: list[pytest.Item]):
# The order of running tests corresponded to the suites
items.sort(key=lambda item: item.location[0])
# Change order of tests based on @pytest.mark.order(<int>) marker
def order(item: pytest.Item) -> int:
order_marker = item.get_closest_marker("order")
if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)):
raise RuntimeError("Incorrect usage of pytest.mark.order")
order_value = order_marker.args[0] if order_marker else 0
return order_value
items.sort(key=lambda item: order(item))

View file

@ -1,3 +0,0 @@
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
from frostfs_testlib.hosting.hosting import Hosting
from frostfs_testlib.hosting.interfaces import Host

View file

@ -1,77 +0,0 @@
from dataclasses import dataclass, field, fields
from typing import Any
@dataclass
class ParsedAttributes:
"""Base class for data structures representing parsed attributes from configs."""
@classmethod
def parse(cls, attributes: dict[str, Any]):
# Pick attributes supported by the class
field_names = set(field.name for field in fields(cls))
supported_attributes = {key: value for key, value in attributes.items() if key in field_names}
return cls(**supported_attributes)
@dataclass
class CLIConfig:
"""Describes CLI tool on some host.
Attributes:
name: Name of the tool.
exec_path: Path to executable file of the tool.
attributes: Dict with extra information about the tool.
"""
name: str
exec_path: str
attributes: dict[str, str] = field(default_factory=dict)
extra_args: list[str] = field(default_factory=list)
@dataclass
class ServiceConfig:
"""Describes frostFS service on some host.
Attributes:
name: Name of the service that uniquely identifies it across all hosts.
attributes: Dict with extra information about the service. For example, we can store
name of docker container (or name of systemd service), endpoints, path to wallet,
path to configuration file, etc.
"""
name: str
attributes: dict[str, str] = field(default_factory=dict)
@dataclass
class HostConfig:
"""Describes machine that hosts frostFS services.
Attributes:
plugin_name: Name of plugin that should be used to manage the host.
healthcheck_plugin_name: Name of the plugin for healthcheck operations.
address: Address of the machine (IP or DNS name).
services: List of services hosted on the machine.
clis: List of CLI tools available on the machine.
attributes: Dict with extra information about the host. For example, we can store
connection parameters in this dict.
"""
plugin_name: str
hostname: str
healthcheck_plugin_name: str
address: str
s3_creds_plugin_name: str = field(default="authmate")
grpc_creds_plugin_name: str = field(default="wallet_factory")
product: str = field(default="frostfs")
services: list[ServiceConfig] = field(default_factory=list)
clis: list[CLIConfig] = field(default_factory=list)
attributes: dict[str, str] = field(default_factory=dict)
interfaces: dict[str, str] = field(default_factory=dict)
environment: dict[str, str] = field(default_factory=dict)
def __post_init__(self) -> None:
self.services = [ServiceConfig(**service) for service in self.services or []]
self.clis = [CLIConfig(**cli) for cli in self.clis or []]

View file

@ -1,342 +0,0 @@
import json
import logging
import os
import re
import time
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Optional
import docker
from requests import HTTPError
from frostfs_testlib.hosting.config import ParsedAttributes
from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus
from frostfs_testlib.shell import LocalShell, Shell, SSHShell
from frostfs_testlib.shell.command_inspectors import SudoInspector
logger = logging.getLogger("frostfs.testlib.hosting")
@dataclass
class HostAttributes(ParsedAttributes):
"""Represents attributes of host where Docker with frostFS runs.
Attributes:
sudo_shell: Specifies whether shell commands should be auto-prefixed with sudo.
docker_endpoint: Protocol, address and port of docker where frostFS runs. Recommended format
is tcp socket (https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option),
for example: tcp://{address}:2375 (where 2375 is default docker port).
ssh_login: Login for SSH connection to the machine where docker runs.
ssh_password: Password for SSH connection.
ssh_private_key_path: Path to private key for SSH connection.
ssh_private_key_passphrase: Passphrase for the private key.
"""
sudo_shell: bool = False
docker_endpoint: Optional[str] = None
ssh_login: Optional[str] = None
ssh_password: Optional[str] = None
ssh_private_key_path: Optional[str] = None
ssh_private_key_passphrase: Optional[str] = None
@dataclass
class ServiceAttributes(ParsedAttributes):
"""Represents attributes of service running as Docker container.
Attributes:
container_name: Name of Docker container where the service runs.
volume_name: Name of volume where storage node service stores the data.
start_timeout: Timeout (in seconds) for service to start.
stop_timeout: Timeout (in seconds) for service to stop.
"""
container_name: str
volume_name: Optional[str] = None
start_timeout: int = 90
stop_timeout: int = 90
class DockerHost(Host):
"""Manages services hosted in Docker containers running on a local or remote machine."""
def get_shell(self, sudo: bool = False) -> Shell:
host_attributes = HostAttributes.parse(self._config.attributes)
command_inspectors = []
if sudo:
command_inspectors.append(SudoInspector())
if not host_attributes.ssh_login:
# If there is no SSH connection to the host, use local shell
return LocalShell(command_inspectors)
# If there is SSH connection to the host, use SSH shell
return SSHShell(
host=self._config.address,
login=host_attributes.ssh_login,
password=host_attributes.ssh_password,
private_key_path=host_attributes.ssh_private_key_path,
private_key_passphrase=host_attributes.ssh_private_key_passphrase,
command_inspectors=command_inspectors,
)
def start_host(self) -> None:
# We emulate starting machine by starting all services
# As an alternative we can probably try to stop docker service...
for service_config in self._config.services:
self.start_service(service_config.name)
def get_host_status(self) -> HostStatus:
# We emulate host status by checking all services.
for service_config in self._config.services:
state = self._get_container_state(service_config.name)
if state != "running":
return HostStatus.OFFLINE
return HostStatus.ONLINE
def stop_host(self) -> None:
# We emulate stopping machine by stopping all services
# As an alternative we can probably try to stop docker service...
for service_config in self._config.services:
self.stop_service(service_config.name)
def start_service(self, service_name: str) -> None:
service_attributes = self._get_service_attributes(service_name)
client = self._get_docker_client()
client.start(service_attributes.container_name)
self._wait_for_container_to_be_in_state(
container_name=service_attributes.container_name,
expected_state="running",
timeout=service_attributes.start_timeout,
)
def stop_service(self, service_name: str) -> None:
service_attributes = self._get_service_attributes(service_name)
client = self._get_docker_client()
client.stop(service_attributes.container_name)
self._wait_for_container_to_be_in_state(
container_name=service_attributes.container_name,
expected_state="exited",
timeout=service_attributes.stop_timeout,
)
def mask_service(self, service_name: str) -> None:
# Not required for Docker
return
def unmask_service(self, service_name: str) -> None:
# Not required for Docker
return
def wait_success_suspend_process(self, service_name: str):
raise NotImplementedError("Not supported for docker")
def wait_success_resume_process(self, service_name: str):
raise NotImplementedError("Not supported for docker")
def restart_service(self, service_name: str) -> None:
service_attributes = self._get_service_attributes(service_name)
client = self._get_docker_client()
client.restart(service_attributes.container_name)
self._wait_for_container_to_be_in_state(
container_name=service_attributes.container_name,
expected_state="running",
timeout=service_attributes.start_timeout,
)
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
raise NotImplementedError("Not implemented for docker")
def get_data_directory(self, service_name: str) -> str:
service_attributes = self._get_service_attributes(service_name)
client = self._get_docker_client()
volume_info = client.inspect_volume(service_attributes.volume_name)
volume_path = volume_info["Mountpoint"]
return volume_path
def send_signal_to_service(self, service_name: str, signal: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_metabase(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_write_cache(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_fstree(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_blobovnicza(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_pilorama(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_file(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def is_file_exist(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def wipefs_storage_node_data(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def finish_wipefs(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
volume_path = self.get_data_directory(service_name)
shell = self.get_shell()
meta_clean_cmd = f"rm -rf {volume_path}/meta*/*"
data_clean_cmd = f"; rm -rf {volume_path}/data*/*" if not cache_only else ""
cmd = f"{meta_clean_cmd}{data_clean_cmd}"
shell.exec(cmd)
def attach_disk(self, device: str, disk_info: DiskInfo) -> None:
raise NotImplementedError("Not supported for docker")
def detach_disk(self, device: str) -> DiskInfo:
raise NotImplementedError("Not supported for docker")
def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool:
raise NotImplementedError("Not supported for docker")
def dump_logs(
self,
directory_path: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
filter_regex: Optional[str] = None,
) -> None:
client = self._get_docker_client()
for service_config in self._config.services:
container_name = self._get_service_attributes(service_config.name).container_name
try:
logs = client.logs(container_name, since=since, until=until)
except HTTPError as exc:
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
continue
if filter_regex:
logs = (
"\n".join(match[0] for match in re.findall(filter_regex, logs, re.IGNORECASE))
or f"No matches found in logs based on given filter '{filter_regex}'"
)
# Save logs to the directory
file_path = os.path.join(
directory_path,
f"{self._config.address}-{container_name}-log.txt",
)
with open(file_path, "wb") as file:
file.write(logs)
def get_filtered_logs(
self,
filter_regex: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
word_count: bool = None,
) -> str:
client = self._get_docker_client()
filtered_logs = ""
for service_config in self._config.services:
container_name = self._get_service_attributes(service_config.name).container_name
try:
filtered_logs = client.logs(container_name, since=since, until=until)
except HTTPError as exc:
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
continue
if exclude_filter:
filtered_logs = filtered_logs.replace(exclude_filter, "")
matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE)
found = list(matches)
if found:
filtered_logs += f"{container_name}:\n{os.linesep.join(found)}"
return filtered_logs
def is_message_in_logs(
self,
message_regex: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
unit: Optional[str] = None,
) -> bool:
client = self._get_docker_client()
for service_config in self._config.services:
container_name = self._get_service_attributes(service_config.name).container_name
try:
logs = client.logs(container_name, since=since, until=until)
except HTTPError as exc:
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
continue
if message_regex:
matches = re.findall(message_regex, logs, re.IGNORECASE)
if matches:
return True
return False
def _get_service_attributes(self, service_name) -> ServiceAttributes:
service_config = self.get_service_config(service_name)
return ServiceAttributes.parse(service_config.attributes)
def _get_docker_client(self) -> docker.APIClient:
docker_endpoint = HostAttributes.parse(self._config.attributes).docker_endpoint
if not docker_endpoint:
# Use default docker client that talks to unix socket
return docker.APIClient()
# Otherwise use docker client that talks to specified endpoint
return docker.APIClient(base_url=docker_endpoint)
def _get_container_by_name(self, container_name: str) -> dict[str, Any]:
client = self._get_docker_client()
containers = client.containers(all=True)
for container in containers:
# Names in local docker environment are prefixed with /
clean_names = set(name.strip("/") for name in container["Names"])
if container_name in clean_names:
return container
return None
def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None:
iterations = 10
iteration_wait_time = timeout / iterations
# To speed things up, we break timeout in smaller iterations and check container state
# several times. This way waiting stops as soon as container reaches the expected state
for _ in range(iterations):
state = self._get_container_state(container_name)
if state == expected_state:
return
time.sleep(iteration_wait_time)
raise RuntimeError(f"Container {container_name} is not in {expected_state} state.")
def _get_container_state(self, container_name: str) -> str:
container = self._get_container_by_name(container_name)
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
return container.get("State", None)

View file

@ -1,107 +0,0 @@
import re
from typing import Any
from frostfs_testlib.hosting.config import HostConfig, ServiceConfig
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.plugins import load_plugin
class Hosting:
"""Hosting manages infrastructure where frostFS runs (machines and frostFS services)."""
_hosts: list[Host]
_host_by_address: dict[str, Host]
_host_by_service_name: dict[str, Host]
@property
def hosts(self) -> list[Host]:
"""Returns all hosts registered in the hosting.
Returns:
List of hosts.
"""
return self._hosts
def configure(self, config: dict[str, Any]) -> None:
"""Configures hosts from specified config.
All existing hosts will be removed from the hosting.
Args:
config: Dictionary with hosting configuration.
"""
hosts = []
host_by_address = {}
host_by_service_name = {}
host_configs = [HostConfig(**host_config) for host_config in config["hosts"]]
for host_config in host_configs:
host_class = load_plugin("frostfs.testlib.hosting", host_config.plugin_name)
host = host_class(host_config)
hosts.append(host)
host_by_address[host_config.address] = host
for service_config in host_config.services:
host_by_service_name[service_config.name] = host
self._hosts = hosts
self._host_by_address = host_by_address
self._host_by_service_name = host_by_service_name
def get_host_by_address(self, host_address: str) -> Host:
"""Returns host with specified address.
Args:
host_address: Address of the host.
Returns:
Host that manages machine with specified address.
"""
host = self._host_by_address.get(host_address)
if host is None:
raise ValueError(f"Unknown host address: '{host_address}'")
return host
def get_host_by_service(self, service_name: str) -> Host:
"""Returns host where service with specified name is located.
Args:
service_name: Name of the service.
Returns:
Host that manages machine where service is located.
"""
host = self._host_by_service_name.get(service_name)
if host is None:
raise ValueError(f"Unknown service name: '{service_name}'")
return host
def get_service_config(self, service_name: str) -> ServiceConfig:
"""Returns config of service with specified name.
Args:
service_name: Name of the service.
Returns:
Config of the service.
"""
host = self.get_host_by_service(service_name)
return host.get_service_config(service_name)
def find_service_configs(self, service_name_pattern: str) -> list[ServiceConfig]:
"""Finds configs of services where service name matches specified regular expression.
Args:
service_name_pattern - regular expression for service names.
Returns:
List of service configs matched with the regular expression.
"""
service_configs = [
service_config
for host in self.hosts
for service_config in host.config.services
if re.match(service_name_pattern, service_config.name)
]
return service_configs

View file

@ -1,398 +0,0 @@
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Optional
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.testing.test_control import retry
class HostStatus(HumanReadableEnum):
ONLINE = "Online"
OFFLINE = "Offline"
UNKNOWN = "Unknown"
class DiskInfo(dict):
"""Dict wrapper for disk_info for disk management commands."""
class Host(ABC):
"""Interface of a host machine where frostFS services are running.
Allows to manage the machine and frostFS services that are hosted on it.
"""
def __init__(self, config: HostConfig) -> None:
self._config = config
self._service_config_by_name = {service_config.name: service_config for service_config in config.services}
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
def __repr__(self) -> str:
return self.config.address
@property
def config(self) -> HostConfig:
"""Returns config of the host.
Returns:
Config of this host.
"""
return self._config
def get_service_config(self, service_name: str) -> ServiceConfig:
"""Returns config of service with specified name.
The service must be hosted on this host.
Args:
service_name: Name of the service.
Returns:
Config of the service.
"""
service_config = self._service_config_by_name.get(service_name)
if service_config is None:
raise ValueError(f"Unknown service name: '{service_name}'")
return service_config
def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig:
"""Returns config of CLI tool with specified name.
The CLI must be located on this host.
Args:
cli_name: Name of the CLI tool.
Returns:
Config of the CLI tool.
"""
cli_config = self._cli_config_by_name.get(cli_name)
if cli_config is None and not allow_empty:
raise ValueError(f"Unknown CLI name: '{cli_name}'")
return cli_config
@abstractmethod
def get_shell(self, sudo: bool = True) -> Shell:
"""Returns shell to this host.
Args:
sudo: if True, run all commands in shell with elevated rights
Returns:
Shell that executes commands on this host.
"""
@abstractmethod
def start_host(self) -> None:
"""Starts the host machine."""
@abstractmethod
def get_host_status(self) -> HostStatus:
"""Check host status."""
@abstractmethod
def stop_host(self, mode: str) -> None:
"""Stops the host machine.
Args:
mode: Specifies mode how host should be stopped. Mode might be host-specific.
"""
@abstractmethod
def start_service(self, service_name: str) -> None:
"""Starts the service with specified name and waits until it starts.
The service must be hosted on this host.
Args:
service_name: Name of the service to start.
"""
@abstractmethod
def stop_service(self, service_name: str) -> None:
"""Stops the service with specified name and waits until it stops.
The service must be hosted on this host.
Args:
service_name: Name of the service to stop.
"""
@abstractmethod
def send_signal_to_service(self, service_name: str, signal: str) -> None:
"""Send signal to service with specified name using kill -<signal>
The service must be hosted on this host.
Args:
service_name: Name of the service to stop.
signal: signal name. See kill -l to all names
"""
@abstractmethod
def mask_service(self, service_name: str) -> None:
"""Prevent the service from start by any activity by masking it.
The service must be hosted on this host.
Args:
service_name: Name of the service to mask.
"""
@abstractmethod
def unmask_service(self, service_name: str) -> None:
"""Allow the service to start by any activity by unmasking it.
The service must be hosted on this host.
Args:
service_name: Name of the service to unmask.
"""
@abstractmethod
def restart_service(self, service_name: str) -> None:
"""Restarts the service with specified name and waits until it starts.
The service must be hosted on this host.
Args:
service_name: Name of the service to restart.
"""
@abstractmethod
def get_data_directory(self, service_name: str) -> str:
"""
Getting path to data directory on node for further usage
(example: list databases pilorama.db)
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def wait_success_suspend_process(self, process_name: str) -> None:
"""Search for a service ID by its name and stop the process
Args:
process_name: Name
"""
@abstractmethod
def wait_success_resume_process(self, process_name: str) -> None:
"""Search for a service by its ID and start the process
Args:
process_name: Name
"""
@abstractmethod
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
cache_only: To delete cache only.
"""
@abstractmethod
def wipefs_storage_node_data(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
def finish_wipefs(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def delete_fstree(self, service_name: str) -> None:
"""
Deletes all fstrees in the node.
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def delete_metabase(self, service_name: str) -> None:
"""
Deletes all metabase*.db in the node.
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def delete_write_cache(self, service_name: str) -> None:
"""
Deletes all write_cache in the node.
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def delete_blobovnicza(self, service_name: str) -> None:
"""
Deletes all blobovniczas in the node.
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def delete_file(self, file_path: str) -> None:
"""
Deletes file with provided file path
Args:
file_path: full path to the file to delete
"""
@abstractmethod
def is_file_exist(self, file_path: str) -> bool:
"""
Checks if file exist
Args:
file_path: full path to the file to check
"""
@abstractmethod
def detach_disk(self, device: str) -> DiskInfo:
"""Detaches disk device to simulate disk offline/failover scenario.
Args:
device: Device name to detach.
Returns:
internal service disk info related to host plugin (i.e. volume id for cloud devices),
which may be used to identify or re-attach existing volume back.
"""
@abstractmethod
def attach_disk(self, device: str, disk_info: DiskInfo) -> None:
"""Attaches disk device back.
Args:
device: Device name to attach.
service_info: any info required for host plugin to identify/attach disk.
"""
@abstractmethod
def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool:
"""Checks if disk device is attached.
Args:
device: Device name to check.
service_info: any info required for host plugin to identify disk.
Returns:
True if attached.
False if detached.
"""
@abstractmethod
def dump_logs(
self,
directory_path: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
filter_regex: Optional[str] = None,
) -> None:
"""Dumps logs of all services on the host to specified directory.
Args:
directory_path: Path to the directory where logs should be stored.
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
filter_regex: regex to filter output
"""
@abstractmethod
def get_filtered_logs(
self,
filter_regex: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
word_count: bool = None,
) -> str:
"""Get logs from host filtered by regex.
Args:
filter_regex: regex filter for logs.
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
word_count: output type, expected values: lines, bytes, json
Returns:
Found entries as str if any found.
Empty string otherwise.
"""
@abstractmethod
def is_message_in_logs(
self,
message_regex: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
unit: Optional[str] = None,
) -> bool:
"""Checks logs on host for specified message regex.
Args:
message_regex: message to find.
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
Returns:
True if message found in logs in the given time frame.
False otherwise.
"""
@abstractmethod
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
"""
Waites for service to be in specified state.
Args:
systemd_service_name: Service to wait state of.
expected_state: State to wait for
timeout: Seconds to wait
"""
def down_interface(self, interface: str) -> None:
shell = self.get_shell()
shell.exec(f"ip link set {interface} down")
def up_interface(self, interface: str) -> None:
shell = self.get_shell()
shell.exec(f"ip link set {interface} up")
def check_state(self, interface: str) -> str:
shell = self.get_shell()
return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip()
@retry(max_attempts=5, sleep_interval=5, expected_result="UP")
def check_state_up(self, interface: str) -> str:
return self.check_state(interface=interface)
@retry(max_attempts=5, sleep_interval=5, expected_result="DOWN")
def check_state_down(self, interface: str) -> str:
return self.check_state(interface=interface)

View file

@ -1,15 +0,0 @@
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.load_config import (
EndpointSelectionStrategy,
K6ProcessAllocationStrategy,
LoadParams,
LoadScenario,
LoadType,
NodesSelectionStrategy,
Preset,
ReadFrom,
)
from frostfs_testlib.load.load_report import LoadReport
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner

View file

@ -1,14 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.shell.interfaces import Shell
class Loader(ABC):
@abstractmethod
def get_shell(self) -> Shell:
"""Get shell for the loader"""
@property
@abstractmethod
def ip(self):
"""Get address of the loader"""

View file

@ -1,55 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import LoadParams
from frostfs_testlib.storage.cluster import ClusterNode
class ScenarioRunner(ABC):
@abstractmethod
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
"""Preparation steps before running the load"""
@abstractmethod
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
"""Init K6 instances"""
@abstractmethod
def get_k6_instances(self) -> list[K6]:
"""Get K6 instances"""
@abstractmethod
def start(self):
"""Start K6 instances"""
@abstractmethod
def stop(self):
"""Stop K6 instances"""
@abstractmethod
def preset(self):
"""Run preset for load"""
@property
@abstractmethod
def is_running(self) -> bool:
"""Returns True if load is running at the moment"""
@abstractmethod
def wait_until_finish(self, soft_timeout: int = 0):
"""Wait until load is finished"""
@abstractmethod
def get_results(self) -> dict:
"""Get results from K6 run"""
@abstractmethod
def get_loaders(self) -> list[Loader]:
"""Return loaders"""

View file

@ -1,96 +0,0 @@
from dataclasses import dataclass, field
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
@dataclass
class SummarizedErorrs:
total: int = field(default_factory=int)
percent: float = field(default_factory=float)
threshold: float = field(default_factory=float)
by_node: dict[str, int] = field(default_factory=dict)
def calc_stats(self, operations):
self.total += sum(self.by_node.values())
if not operations:
return
self.percent = self.total / operations * 100
@dataclass
class SummarizedLatencies:
avg: float = field(default_factory=float)
min: float = field(default_factory=float)
max: float = field(default_factory=float)
by_node: dict[str, dict[str, int]] = field(default_factory=dict)
def calc_stats(self):
if not self.by_node:
return
avgs = [lt["avg"] for lt in self.by_node.values()]
self.avg = sum(avgs) / len(avgs)
minimal = [lt["min"] for lt in self.by_node.values()]
self.min = min(minimal)
maximum = [lt["max"] for lt in self.by_node.values()]
self.max = max(maximum)
@dataclass
class SummarizedStats:
threads: int = field(default_factory=int)
requested_rate: int = field(default_factory=int)
operations: int = field(default_factory=int)
rate: float = field(default_factory=float)
throughput: float = field(default_factory=float)
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
total_bytes: int = field(default_factory=int)
passed: bool = True
def calc_stats(self):
self.errors.calc_stats(self.operations)
self.latencies.calc_stats()
self.passed = self.errors.percent <= self.errors.threshold
@staticmethod
def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]:
if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0)
write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0)
read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0)
else:
write_vus = load_params.writers
read_vus = load_params.readers
delete_vus = load_params.deleters
summarized = {
"Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate),
"Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate),
"Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate),
}
for node_key, load_summary in load_summaries.items():
metrics = get_metrics_object(load_params.scenario, load_summary)
for operation in metrics.operations:
target = summarized[operation._NAME]
if not operation.total_iterations:
continue
target.operations += operation.total_iterations
target.rate += operation.rate
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes += operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations
for operation in summarized.values():
operation.calc_stats()
return summarized

View file

@ -1,268 +0,0 @@
import json
import logging
import math
import os
from dataclasses import dataclass
from datetime import datetime
from threading import Event
from time import sleep
from typing import Any
from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import User
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
from frostfs_testlib.shell import Shell
from frostfs_testlib.testing.test_control import wait_for_success
EXIT_RESULT_CODE = 0
logger = logging.getLogger("NeoLogger")
@dataclass
class LoadResults:
data_sent: float = 0.0
data_received: float = 0.0
read_ops: float = 0.0
write_ops: float = 0.0
total_ops: float = 0.0
class K6:
_k6_process: RemoteProcess
def __init__(
self,
load_params: LoadParams,
endpoints: list[str],
k6_dir: str,
shell: Shell,
loader: Loader,
user: User,
):
if load_params.scenario is None:
raise RuntimeError("Scenario should not be none")
self.load_params = load_params
self.endpoints = endpoints
self.loader = loader
self.shell = shell
self.user = user
self.preset_output: str = ""
self.summary_json: str = os.path.join(
self.load_params.working_dir,
f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json",
)
self._k6_dir: str = k6_dir
command = (
f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} "
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
)
remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify"
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id)
def _get_fill_percents(self):
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n")
return [line.split() for line in fill_percents][:-1]
def check_fill_percent(self):
fill_percents = self._get_fill_percents()
percent_mean = 0
for line in fill_percents:
percent_mean += float(line[1].split("%")[0])
percent_mean = percent_mean / len(fill_percents)
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
return percent_mean >= self.load_params.fill_percent
@property
def process_dir(self) -> str:
return self._k6_process.process_dir
def preset(self) -> str:
with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"):
preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py"
preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py"
preset_map = {
LoadType.gRPC: preset_grpc,
LoadType.S3: preset_s3,
LoadType.HTTP: preset_grpc,
}
base_args = {
preset_grpc: [
preset_grpc,
f"--endpoint {','.join(self.endpoints)}",
f"--wallet {self.user.wallet.path} ",
f"--config {self.user.wallet.config_path} ",
],
preset_s3: [
preset_s3,
f"--endpoint {','.join(self.endpoints)}",
],
}
preset_scenario = preset_map[self.load_params.load_type]
command_args = base_args[preset_scenario].copy()
command_args += self.load_params.get_preset_arguments()
command = " ".join(command_args)
result = self.shell.exec(command)
assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}"
self.preset_output = result.stdout.strip("\n")
return self.preset_output
@reporter.step("Generate K6 variables")
def _generate_k6_variables(self) -> str:
env_vars = self.load_params.get_k6_vars()
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
env_vars["SUMMARY_JSON"] = self.summary_json
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
@reporter.step("Generate env variables")
def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_env_vars()
if not env_vars:
return ""
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables")
return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " "
def get_start_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.start_time())
def get_end_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.end_time())
def start(self) -> None:
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
self._k6_process.start()
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
if self.load_params.scenario == LoadScenario.VERIFY:
timeout = self.load_params.verify_time or 0
else:
timeout = self.load_params.load_time or 0
start_time = int(self.get_start_time().timestamp())
current_time = int(datetime.utcnow().timestamp())
working_time = current_time - start_time
remaining_time = timeout - working_time
setup_teardown_time = (
int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip())
)
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
timeout = remaining_time_including_setup_and_teardown
if soft_timeout:
timeout = min(timeout, soft_timeout)
original_timeout = timeout
timeouts = {
"K6 start time": start_time,
"Current time": current_time,
"K6 working time": working_time,
"Remaining time for load": remaining_time,
"Setup and teardown": setup_teardown_time,
"Remaining time including setup/teardown": remaining_time_including_setup_and_teardown,
"Soft timeout": soft_timeout,
"Selected timeout": original_timeout,
}
reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt")
min_wait_interval = 10
wait_interval = min_wait_interval
if self._k6_process is None:
assert "No k6 instances were executed"
while timeout > 0:
if not self.load_params.fill_percent is None:
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
if self.check_fill_percent():
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
event.set()
self.stop()
return
if event.is_set():
self.stop()
return
if not self._k6_process.running():
return
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
logger.info(
f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..."
)
sleep(wait_interval)
timeout -= min(timeout, wait_interval)
wait_interval = max(
min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval,
min_wait_interval,
)
if not self._k6_process.running():
return
self.stop()
if not soft_timeout:
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
def get_results(self) -> Any:
with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"):
self.__log_output()
if not self.summary_json:
return None
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
summary_json = json.loads(summary_text)
endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0]
allure_filenames = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json",
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json",
}
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
reporter.attach(summary_text, allure_filename)
return summary_json
def stop(self) -> None:
with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"):
if self.is_running():
self._k6_process.stop()
self._wait_until_process_end()
def is_running(self) -> bool:
if self._k6_process:
return self._k6_process.running()
return False
@reporter.step("Wait until K6 process end")
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout")
def _wait_until_process_end(self):
return self._k6_process.running()
def __log_output(self) -> None:
reporter.attach(self._k6_process.stdout(full=True), "K6 stdout")
reporter.attach(f"{self._k6_process.process_dir}/stderr", "K6 stderr path")

View file

@ -1,491 +0,0 @@
import math
import os
from dataclasses import dataclass, field, fields, is_dataclass
from enum import Enum
from types import MappingProxyType
from typing import Any, Callable, Optional, get_args
from frostfs_testlib.utils.converting_utils import calc_unit
def convert_time_to_seconds(time: int | str | None) -> int:
if time is None:
return None
if str(time).isdigit():
seconds = int(time)
else:
days, hours, minutes = 0, 0, 0
if "d" in time:
days, time = time.split("d")
if "h" in time:
hours, time = time.split("h")
if "min" in time:
minutes = time.replace("min", "")
seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60
return seconds
def force_list(input: str | list[str]):
if input is None:
return None
if isinstance(input, list):
return list(map(str.strip, input))
return [input.strip()]
class LoadType(Enum):
gRPC = "grpc"
S3 = "s3"
HTTP = "http"
class LoadScenario(Enum):
gRPC = "grpc"
gRPC_CAR = "grpc_car"
S3 = "s3"
S3_CAR = "s3_car"
S3_MULTIPART = "s3_multipart"
S3_LOCAL = "s3local"
HTTP = "http"
VERIFY = "verify"
LOCAL = "local"
class ReadFrom(Enum):
REGISTRY = "registry"
PRESET = "preset"
MANUAL = "manual"
all_load_scenarios = [
LoadScenario.gRPC,
LoadScenario.S3,
LoadScenario.HTTP,
LoadScenario.S3_CAR,
LoadScenario.gRPC_CAR,
LoadScenario.LOCAL,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
]
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
constant_vus_scenarios = [
LoadScenario.gRPC,
LoadScenario.S3,
LoadScenario.HTTP,
LoadScenario.LOCAL,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
]
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
grpc_preset_scenarios = [
LoadScenario.gRPC,
LoadScenario.HTTP,
LoadScenario.gRPC_CAR,
LoadScenario.LOCAL,
]
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL]
@dataclass
class MetaField:
name: str
metadata: MappingProxyType
value: Any
def metadata_field(
applicable_scenarios: list[LoadScenario],
preset_param: Optional[str] = None,
scenario_variable: Optional[str] = None,
string_repr: Optional[bool] = True,
distributed: Optional[bool] = False,
formatter: Optional[Callable] = None,
env_variable: Optional[str] = None,
):
return field(
default=None,
metadata={
"applicable_scenarios": applicable_scenarios,
"preset_argument": preset_param,
"scenario_variable": scenario_variable,
"string_repr": string_repr,
"distributed": distributed,
"formatter": formatter,
"env_variable": env_variable,
},
)
class NodesSelectionStrategy(Enum):
# Select ONE random node from cluster nodes.
RANDOM_SINGLE = "RANDOM_SINGLE"
# Select All nodes.
ALL = "ALL"
# Select All nodes except node under test (useful for failover). This is DEFAULT one
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
# Select ONE random node except under test (useful for failover).
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
# Select node under test
NODE_UNDER_TEST = "NODE_UNDER_TEST"
class EndpointSelectionStrategy(Enum):
"""Enum which defines which endpoint to select from each storage node"""
# Select All endpoints.
ALL = "ALL"
# Select first endpoint from node
FIRST = "FIRST"
class K6ProcessAllocationStrategy(Enum):
"""Enum which defines how K6 processes should be allocated"""
# Each load node will get one k6 process with all endpoints (Default)
PER_LOAD_NODE = "PER_LOAD_NODE"
# Each endpoint will get it's own k6 process regardless of number of load nodes.
# If there is not enough load nodes, some nodes may have multiple k6 processes
PER_ENDPOINT = "PER_ENDPOINT"
class MetaConfig:
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
@dataclass
class Preset(MetaConfig):
# ------ COMMON ------
# Amount of objects which should be created
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False)
# Preset json. Filled automatically.
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# TODO: Deprecated. Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# APE rule for containers instead of deprecated ACL
rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list)
# ------ GRPC ------
# Amount of containers which should be created
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False)
# Container placement policy for containers for gRPC
container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list)
# Number of retries for creation of container
container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False)
# ------ S3 ------
# Amount of buckets which should be created
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False)
# S3 region (AKA placement policy for S3 buckets)
s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list)
# Delay between containers creation and object upload for preset
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
# Flag to control preset erorrs
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
# Flag to ensure created containers store data on local endpoints
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
@dataclass
class PrometheusParams(MetaConfig):
# Prometheus server URL
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
# Prometheus trend stats
trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False)
# Additional tags
metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False)
@dataclass
class LoadParams(MetaConfig):
# ------- CONTROL PARAMS -------
# Load type can be gRPC, HTTP, S3.
load_type: LoadType
# Load scenario from k6 scenarios
scenario: Optional[LoadScenario] = None
# Strategy to select nodes under load. See NodesSelectionStrategy class for more details.
# default is ALL_EXCEPT_UNDER_TEST
nodes_selection_strategy: Optional[NodesSelectionStrategy] = None
# Strategy which defines which endpoint to select from each storage node
endpoint_selection_strategy: Optional[EndpointSelectionStrategy] = None
# Strategy which defines how K6 processes should be allocated
k6_process_allocation_strategy: Optional[K6ProcessAllocationStrategy] = None
# Set to true in order to verify uploaded objects after K6 load finish. Default is True.
verify: Optional[bool] = None
# Just id for load so distinct it between runs. Filled automatically.
load_id: Optional[str] = None
# Acceptable number of load errors in %
# 100 means 100% errors allowed
# 1.5 means 1.5% errors allowed
# 0 means no errors allowed
error_threshold: Optional[float] = None
# Working directory
working_dir: Optional[str] = None
# Preset for the k6 run
preset: Optional[Preset] = None
# K6 download url
k6_url: Optional[str] = None
# Requests module url
requests_module_url: Optional[str] = None
# aws cli download url
awscli_url: Optional[str] = None
# No ssl verification flag
no_verify_ssl: Optional[bool] = metadata_field(
[
LoadScenario.S3,
LoadScenario.S3_CAR,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
LoadScenario.VERIFY,
LoadScenario.HTTP,
],
"no-verify-ssl",
"NO_VERIFY_SSL",
False,
)
# Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
# if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format
output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False)
# Prometheus params
prometheus: Optional[PrometheusParams] = None
# ------- COMMON SCENARIO PARAMS -------
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds)
# Object size in KB for load and preset.
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
# For read operations, controls from which set get objects to read
read_from: Optional[ReadFrom] = None
# For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False)
# Output registry K6 file. Filled automatically.
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
# In case if we want to use custom registry file left from another load run
custom_registry: Optional[str] = None
# In case if we want to use custom registry file left from another load run
force_fresh_registry: Optional[bool] = None
# Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached.
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False)
# Prepare/cut objects locally on client before sending
prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False)
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
# Delay for read operations in case if we read from registry
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False)
# Initialization time for each VU for k6 load
vu_init_time: Optional[float] = None
# ------- CONSTANT VUS SCENARIO PARAMS -------
# Amount of Writers VU.
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True)
# Amount of Readers VU.
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True)
# Amount of Deleters VU.
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True)
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
# Number of iterations to start during each timeUnit period for write.
write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True)
# Number of iterations to start during each timeUnit period for read.
read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True)
# Number of iterations to start during each timeUnit period for delete.
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True)
# Amount of preAllocatedVUs for write operations.
preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True)
# Amount of maxVUs for write operations.
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True)
# Amount of preAllocatedVUs for read operations.
preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True)
# Amount of maxVUs for read operations.
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True)
# Amount of preAllocatedVUs for read operations.
preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True)
# Amount of maxVUs for delete operations.
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True)
# Multipart
# Number of parts to upload in parallel
writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True)
# part size must be greater than (5 MB)
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
# Period of time to apply the rate value.
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False)
# ------- VERIFY SCENARIO PARAMS -------
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False)
# Amount of Verification VU.
verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False)
# ------- LOCAL SCENARIO PARAMS -------
# Config file location (filled automatically)
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
# Config directory location (filled automatically)
config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
def set_id(self, load_id):
self.load_id = load_id
if self.read_from == ReadFrom.REGISTRY:
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
# For now it's okay to have it this way
if self.custom_registry is not None:
self.registry_file = self.custom_registry
if self.read_from == ReadFrom.PRESET:
self.registry_file = None
if self.preset:
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
def get_k6_vars(self):
env_vars = {
meta_field.metadata["scenario_variable"]: meta_field.value
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["scenario_variable"]
and meta_field.value is not None
}
return env_vars
def get_env_vars(self):
env_vars = {
meta_field.metadata["env_variable"]: meta_field.value
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["env_variable"]
and meta_field.value is not None
}
return env_vars
def __post_init__(self):
default_scenario_map = {
LoadType.gRPC: LoadScenario.gRPC,
LoadType.HTTP: LoadScenario.HTTP,
LoadType.S3: LoadScenario.S3,
}
if self.scenario is None:
self.scenario = default_scenario_map[self.load_type]
def get_preset_arguments(self):
command_args = [
self._get_preset_argument(meta_field)
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["preset_argument"]
and meta_field.value is not None
and self._get_preset_argument(meta_field)
]
return command_args
def get_init_time(self) -> int:
return math.ceil(self._get_total_vus() * self.vu_init_time)
def _get_total_vus(self) -> int:
vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"]
data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields]
return sum(data_fields)
def _get_applicable_fields(self):
applicable_fields = [
meta_field
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value
]
return applicable_fields
@staticmethod
def _get_preset_argument(meta_field: MetaField) -> str:
if isinstance(meta_field.value, bool):
# For preset calls, bool values are passed with just --<argument_name> if the value is True
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
if isinstance(meta_field.value, list):
return (
" ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else ""
)
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
@staticmethod
def _get_meta_fields(instance) -> list[MetaField]:
data_fields = fields(instance)
fields_with_data = [
MetaField(field.name, field.metadata, getattr(instance, field.name))
for field in data_fields
if field.metadata and getattr(instance, field.name) is not None
]
for field in data_fields:
actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type)
if is_dataclass(actual_field_type) and getattr(instance, field.name):
fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name))
return fields_with_data or []
def __str__(self) -> str:
load_type_str = self.scenario.value if self.scenario else self.load_type.value
# TODO: migrate load_params defaults to testlib
if self.object_size is not None:
size, unit = calc_unit(self.object_size, 1)
static_params = [f"{load_type_str} {size:.4g} {unit}"]
else:
static_params = [f"{load_type_str}"]
dynamic_params = [
f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"]
]
params = ", ".join(static_params + dynamic_params)
return params
def __repr__(self) -> str:
return self.__str__()

View file

@ -1,243 +0,0 @@
from abc import ABC
from typing import Any, Optional
from frostfs_testlib.load.load_config import LoadScenario
class OperationMetric(ABC):
_NAME = ""
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = ""
_LATENCY = ""
def __init__(self, summary) -> None:
self.summary = summary
self.metrics = summary["metrics"]
@property
def total_iterations(self) -> int:
return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS)
@property
def success_iterations(self) -> int:
return self._get_metric(self._SUCCESS)
@property
def latency(self) -> dict:
return self._get_metric(self._LATENCY)
@property
def rate(self) -> float:
return self._get_metric_rate(self._SUCCESS)
@property
def failed_iterations(self) -> int:
return self._get_metric(self._ERRORS)
@property
def throughput(self) -> float:
return self._get_metric_rate(self._THROUGHPUT)
@property
def total_bytes(self) -> float:
return self._get_metric(self._THROUGHPUT)
def _get_metric(self, metric: str) -> int:
metrics_method_map = {
"counter": self._get_counter_metric,
"gauge": self._get_gauge_metric,
"trend": self._get_trend_metrics,
}
if metric not in self.metrics:
return 0
metric = self.metrics[metric]
metric_type = metric["type"]
if metric_type not in metrics_method_map:
raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}")
return metrics_method_map[metric_type](metric)
def _get_metric_rate(self, metric: str) -> int:
metrics_method_map = {"counter": self._get_counter_metric_rate}
if metric not in self.metrics:
return 0
metric = self.metrics[metric]
metric_type = metric["type"]
if metric_type not in metrics_method_map:
raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}")
return metrics_method_map[metric_type](metric)
def _get_counter_metric_rate(self, metric: str) -> int:
return metric["values"]["rate"]
def _get_counter_metric(self, metric: str) -> int:
return metric["values"]["count"]
def _get_gauge_metric(self, metric: str) -> int:
return metric["values"]["value"]
def _get_trend_metrics(self, metric: str) -> int:
return metric["values"]
class WriteOperationMetric(OperationMetric):
_NAME = "Write"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = "data_sent"
_LATENCY = ""
class ReadOperationMetric(OperationMetric):
_NAME = "Read"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = "data_received"
_LATENCY = ""
class DeleteOperationMetric(OperationMetric):
_NAME = "Delete"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = ""
_LATENCY = ""
class GrpcWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "frostfs_obj_put_success"
_ERRORS = "frostfs_obj_put_fails"
_LATENCY = "frostfs_obj_put_duration"
class GrpcReadOperationMetric(ReadOperationMetric):
_SUCCESS = "frostfs_obj_get_success"
_ERRORS = "frostfs_obj_get_fails"
_LATENCY = "frostfs_obj_get_duration"
class GrpcDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "frostfs_obj_delete_success"
_ERRORS = "frostfs_obj_delete_fails"
_LATENCY = "frostfs_obj_delete_duration"
class S3WriteOperationMetric(WriteOperationMetric):
_SUCCESS = "aws_obj_put_success"
_ERRORS = "aws_obj_put_fails"
_LATENCY = "aws_obj_put_duration"
class S3ReadOperationMetric(ReadOperationMetric):
_SUCCESS = "aws_obj_get_success"
_ERRORS = "aws_obj_get_fails"
_LATENCY = "aws_obj_get_duration"
class S3DeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "aws_obj_delete_success"
_ERRORS = "aws_obj_delete_fails"
_LATENCY = "aws_obj_delete_duration"
class S3LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "s3local_obj_put_success"
_ERRORS = "s3local_obj_put_fails"
_LATENCY = "s3local_obj_put_duration"
class S3LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "s3local_obj_get_success"
_ERRORS = "s3local_obj_get_fails"
_LATENCY = "s3local_obj_get_duration"
class LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "local_obj_put_success"
_ERRORS = "local_obj_put_fails"
_LATENCY = "local_obj_put_duration"
class LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "local_obj_get_success"
_ERRORS = "local_obj_get_fails"
class LocalDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "local_obj_delete_success"
_ERRORS = "local_obj_delete_fails"
class VerifyReadOperationMetric(ReadOperationMetric):
_SUCCESS = "verified_obj"
_ERRORS = "invalid_obj"
class MetricsBase(ABC):
def __init__(self) -> None:
self.write: Optional[WriteOperationMetric] = None
self.read: Optional[ReadOperationMetric] = None
self.delete: Optional[DeleteOperationMetric] = None
@property
def operations(self) -> list[OperationMetric]:
return [metric for metric in [self.write, self.read, self.delete] if metric is not None]
class GrpcMetrics(MetricsBase):
def __init__(self, summary) -> None:
super().__init__()
self.write = GrpcWriteOperationMetric(summary)
self.read = GrpcReadOperationMetric(summary)
self.delete = GrpcDeleteOperationMetric(summary)
class S3Metrics(MetricsBase):
def __init__(self, summary) -> None:
super().__init__()
self.write = S3WriteOperationMetric(summary)
self.read = S3ReadOperationMetric(summary)
self.delete = S3DeleteOperationMetric(summary)
class S3LocalMetrics(MetricsBase):
def __init__(self, summary) -> None:
super().__init__()
self.write = S3LocalWriteOperationMetric(summary)
self.read = S3LocalReadOperationMetric(summary)
class LocalMetrics(MetricsBase):
def __init__(self, summary) -> None:
super().__init__()
self.write = LocalWriteOperationMetric(summary)
self.read = LocalReadOperationMetric(summary)
self.delete = LocalDeleteOperationMetric(summary)
class VerifyMetrics(MetricsBase):
def __init__(self, summary) -> None:
super().__init__()
self.read = VerifyReadOperationMetric(summary)
def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase:
class_map = {
LoadScenario.gRPC: GrpcMetrics,
LoadScenario.gRPC_CAR: GrpcMetrics,
LoadScenario.HTTP: GrpcMetrics,
LoadScenario.S3: S3Metrics,
LoadScenario.S3_CAR: S3Metrics,
LoadScenario.S3_MULTIPART: S3Metrics,
LoadScenario.S3_LOCAL: S3LocalMetrics,
LoadScenario.VERIFY: VerifyMetrics,
LoadScenario.LOCAL: LocalMetrics,
}
return class_map[load_type](summary)

View file

@ -1,178 +0,0 @@
from datetime import datetime
from typing import Optional
import yaml
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
from frostfs_testlib.utils.converting_utils import calc_unit
class LoadReport:
def __init__(self, load_test) -> None:
self.load_test = load_test
# List of load summaries dict
self.load_summaries_list: Optional[list[dict]] = []
self.load_params: Optional[LoadParams] = None
self.start_time: Optional[datetime] = None
self.end_time: Optional[datetime] = None
def set_start_time(self, time: datetime = None):
if time is None:
time = datetime.utcnow()
self.start_time = time
def set_end_time(self, time: datetime = None):
if time is None:
time = datetime.utcnow()
self.end_time = time
def add_summaries(self, load_summaries: dict):
self.load_summaries_list.append(load_summaries)
def set_load_params(self, load_params: LoadParams):
self.load_params = load_params
def get_report_html(self):
report_sections = [
[self.load_params, self._get_load_id_section_html],
[self.load_test, self._get_load_params_section_html],
[self.load_summaries_list, self._get_totals_section_html],
[self.end_time, self._get_test_time_html],
]
html = ""
for section in report_sections:
if section[0] is not None:
html += section[1]()
return html
def _get_load_params_section_html(self) -> str:
params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True)
params = params.replace("\n", "<br>").replace(" ", "&nbsp;")
section_html = f"""<h3>Scenario params</h3>
<pre>{params}</pre>
<hr>"""
return section_html
def _get_load_id_section_html(self) -> str:
section_html = f"""<h3>Load ID: {self.load_params.load_id}</h3>
<hr>"""
return section_html
def _get_test_time_html(self) -> str:
if not self.start_time or not self.end_time:
return ""
html = f"""<h3>Scenario duration</h3>
{self.start_time} - {self.end_time}<br>
<hr>
"""
return html
def _seconds_to_formatted_duration(self, seconds: int) -> str:
"""Converts N number of seconds to formatted output ignoring zeroes.
Examples:
186399 -> "2d3h46m39s"
86399 -> "23h59m59s"
86399 -> "23h59m59s"
3605 -> "1h5s"
123 -> "2m3s"
"""
units = {"d": 86400, "h": 3600, "m": 60, "s": 1}
parts = []
remaining = seconds
for divisor in units.values():
part = remaining // divisor
remaining -= divisor * part
parts.append(part)
return "".join([f"{val}{unit}" for unit, val in zip(units, parts) if val > 0])
def _row(self, caption: str, value: str) -> str:
return f"<tr><th>{caption}</th><td>{value}</td></tr>"
def _get_model_string(self):
if self.load_params.min_iteration_duration is not None:
return f"min_iteration_duration={self.load_params.min_iteration_duration}"
model_map = {
LoadScenario.gRPC: "closed model",
LoadScenario.S3: "closed model",
LoadScenario.S3_MULTIPART: "closed model",
LoadScenario.HTTP: "closed model",
LoadScenario.gRPC_CAR: "open model",
LoadScenario.S3_CAR: "open model",
LoadScenario.LOCAL: "local fill",
LoadScenario.S3_LOCAL: "local fill",
}
return model_map[self.load_params.scenario]
def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats):
throughput_html = ""
if stats.throughput > 0:
throughput, unit = calc_unit(stats.throughput)
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
bytes_html = ""
if stats.total_bytes > 0:
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
per_node_errors_html = ""
for node_key, errors in stats.errors.by_node.items():
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
per_node_errors_html += self._row(f"At {node_key}", errors)
latency_html = ""
for node_key, latencies in stats.latencies.by_node.items():
latency_values = "N/A"
if latencies:
latency_values = ""
for param_name, param_val in latencies.items():
latency_values += f"{param_name}={param_val:.2f}ms "
latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values)
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
model = self._get_model_string()
requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else ""
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s"
html = f"""
<table border="1" cellpadding="5px"><tbody>
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
{self._row("Total operations", stats.operations)}
{self._row("OP/sec", f"{stats.rate:.2f}")}
{bytes_html}
{throughput_html}
{latency_html}
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
{per_node_errors_html}
{self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")}
{self._row("Threshold", f"{stats.errors.threshold:.2f}%")}
</tbody></table><br><hr>
"""
return html
def _get_totals_section_html(self):
html = ""
for i in range(len(self.load_summaries_list)):
html += f"<h3>Load Results for load #{i+1}</h3>"
summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i])
for operation_type, stats in summarized.items():
if stats.operations:
html += self._get_operations_sub_section_html(operation_type, stats)
return html

View file

@ -1,68 +0,0 @@
from frostfs_testlib import reporter
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
class LoadVerifier:
def __init__(self, load_params: LoadParams) -> None:
self.load_params = load_params
def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]:
summarized = SummarizedStats.collect(self.load_params, load_summaries)
issues = []
for operation_type, stats in summarized.items():
if stats.threads and not stats.operations:
issues.append(f"No any {operation_type.lower()} operation was performed")
if stats.errors.percent > stats.errors.threshold:
rate_str = self._get_rate_str(stats.errors.percent)
issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%")
return issues
def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]:
verify_issues: list[str] = []
for k6_process_label in load_summaries:
with reporter.step(f"Check verify scenario results for {k6_process_label}"):
verify_issues.extend(
self._collect_verify_issues_on_process(
k6_process_label,
load_summaries[k6_process_label],
verification_summaries[k6_process_label],
)
)
return verify_issues
def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str:
return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%"
def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]:
issues = []
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
writers = self.load_params.writers or self.load_params.preallocated_writers or 0
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
delete_success = 0
if deleters > 0:
delete_success = load_metrics.delete.success_iterations
if verification_summary:
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
verified_objects = verify_metrics.read.success_iterations
invalid_objects = verify_metrics.read.failed_iterations
total_left_objects = load_metrics.write.success_iterations - delete_success
if invalid_objects > 0:
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).")
# Due to interruptions we may see total verified objects to be less than written on writers count
if abs(total_left_objects - verified_objects) > writers:
issues.append(
f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}."
)
return issues

View file

@ -1,60 +0,0 @@
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.resources.load_params import (
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
)
from frostfs_testlib.shell.interfaces import Shell, SshCredentials
from frostfs_testlib.shell.ssh_shell import SSHShell
from frostfs_testlib.storage.cluster import ClusterNode
class RemoteLoader(Loader):
def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None:
self.ssh_credentials = ssh_credentials
self._ip = ip
@property
def ip(self):
return self._ip
def get_shell(self) -> Shell:
ssh_client = SSHShell(
host=self.ip,
login=self.ssh_credentials.ssh_login,
password=self.ssh_credentials.ssh_password,
private_key_path=self.ssh_credentials.ssh_key_path,
private_key_passphrase=self.ssh_credentials.ssh_key_passphrase,
)
return ssh_client
@classmethod
def from_ip_list(cls, ip_list: list[str]) -> list[Loader]:
loaders: list[Loader] = []
ssh_credentials = SshCredentials(
LOAD_NODE_SSH_USER,
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
)
for ip in ip_list:
loaders.append(RemoteLoader(ssh_credentials, ip))
return loaders
class NodeLoader(Loader):
"""When ClusterNode is the loader for itself (for Local scenario only)."""
def __init__(self, cluster_node: ClusterNode) -> None:
self.cluster_node = cluster_node
def get_shell(self) -> Shell:
return self.cluster_node.host.get_shell()
@property
def ip(self):
return self.cluster_node.host_ip

View file

@ -1,466 +0,0 @@
import copy
import itertools
import math
import time
from dataclasses import fields
from threading import Event
from typing import Optional
from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import S3Credentials, User
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
from frostfs_testlib.shell.command_inspectors import SuInspector
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper
class RunnerBase(ScenarioRunner):
k6_instances: list[K6]
loaders: list[Loader]
@reporter.step("Run preset on loaders")
def preset(self):
parallel([k6.preset for k6 in self.k6_instances])
@reporter.step("Wait until load finish")
def wait_until_finish(self, soft_timeout: int = 0):
event = Event()
parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout)
@property
def is_running(self):
futures = parallel([k6.is_running for k6 in self.k6_instances])
return any([future.result() for future in futures])
def get_k6_instances(self):
return self.k6_instances
def get_loaders(self) -> list[Loader]:
return self.loaders
class DefaultRunner(RunnerBase):
user: User
def __init__(
self,
user: User,
load_ip_list: Optional[list[str]] = None,
) -> None:
if load_ip_list is None:
load_ip_list = LOAD_NODES
self.loaders = RemoteLoader.from_ip_list(load_ip_list)
self.user = user
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
if load_params.force_fresh_registry and load_params.custom_registry:
with reporter.step("Forcing fresh registry files"):
parallel(self._force_fresh_registry, self.loaders, load_params)
if load_params.load_type != LoadType.S3:
return
with reporter.step("Init s3 client on loaders"):
s3_credentials = self.user.s3_credentials
parallel(self._aws_configure_on_loader, self.loaders, s3_credentials)
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _aws_configure_on_loader(
self,
loader: Loader,
s3_credentials: S3Credentials,
):
with reporter.step(f"Aws configure on {loader.ip}"):
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
cycled_loaders = itertools.cycle(self.loaders)
k6_distribution_count = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders),
K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints),
}
endpoints_generators = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]),
}
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count)
futures = parallel(
self._init_k6_instance,
distributed_load_params_list,
loader=cycled_loaders,
endpoints=endpoints_gen,
k6_dir=k6_dir,
)
self.k6_instances = [future.result() for future in futures]
def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str):
shell = loader.get_shell()
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
with reporter.step(f"Make working directory"):
shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}")
shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}")
return K6(
load_params_for_loader,
endpoints,
k6_dir,
shell,
loader,
self.user,
)
def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]:
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
distributed_load_params: list[LoadParams] = []
for i in range(workers_count):
load_params = copy.deepcopy(original_load_params)
# Append #i here in case if multiple k6 processes goes into same load node
load_params.set_id(f"{load_params.load_id}_{i}")
distributed_load_params.append(load_params)
load_fields = fields(original_load_params)
for field in load_fields:
if (
field.metadata
and original_load_params.scenario in field.metadata["applicable_scenarios"]
and field.metadata["distributed"]
and getattr(original_load_params, field.name) is not None
):
original_value = getattr(original_load_params, field.name)
distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count)
for i in range(workers_count):
setattr(distributed_load_params[i], field.name, distribution[i])
return distributed_load_params
def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]:
"""
This function will distribute evenly as possible X clients to Y workers.
For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers)
this will return [38, 38, 37, 37].
Args:
clients_count: amount of things needs to be distributed.
workers_count: amount of workers.
Returns:
list of distribution.
"""
if workers_count < 1:
raise Exception("Workers cannot be less then 1")
# Amount of guaranteed payload on one worker
clients_per_worker = clients_count // workers_count
# Remainder of clients left to be distributed
remainder = clients_count - clients_per_worker * workers_count
distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)]
return distribution
def start(self):
load_params = self.k6_instances[0].load_params
parallel([k6.start for k6 in self.k6_instances])
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time)
def stop(self):
for k6_instance in self.k6_instances:
k6_instance.stop()
def get_results(self) -> dict:
results = {}
for k6_instance in self.k6_instances:
if k6_instance.load_params.k6_process_allocation_strategy is None:
raise RuntimeError("k6_process_allocation_strategy should not be none")
result = k6_instance.get_results()
endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0]
keys_map = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip,
K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint,
}
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
results[key] = result
return results
class LocalRunner(RunnerBase):
cluster_state_controller: ClusterStateController
file_keeper: FileKeeper
user: User
def __init__(
self,
cluster_state_controller: ClusterStateController,
file_keeper: FileKeeper,
nodes_under_load: list[ClusterNode],
user: User,
) -> None:
self.cluster_state_controller = cluster_state_controller
self.file_keeper = file_keeper
self.loaders = [NodeLoader(node) for node in nodes_under_load]
self.nodes_under_load = nodes_under_load
self.user = user
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params)
@retry(3, 5, expected_result=True)
def allow_user_to_login_in_system(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
result = None
try:
shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}")
self.lock_passwd_on_node(cluster_node)
options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)])
result = shell.exec("whoami", options)
finally:
if not result or result.return_code:
self.restore_passwd_on_node(cluster_node)
return False
return True
@reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
shell = cluster_node.host.get_shell()
with reporter.step("Allow storage user to login into system"):
self.allow_user_to_login_in_system(cluster_node)
with reporter.step("Update limits.conf"):
limits_path = "/etc/security/limits.conf"
self.file_keeper.add(cluster_node.storage_node, limits_path)
content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
with reporter.step("Download K6"):
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("chmod 777 wallet related files on loader"):
shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}")
shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}")
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
futures = parallel(
self._init_k6_instance,
self.loaders,
load_params,
k6_dir,
)
self.k6_instances = [future.result() for future in futures]
def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str):
shell = loader.get_shell()
with reporter.step(f"Init K6 instance on {loader.ip}"):
with reporter.step(f"Make working directory"):
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
# If we chmod /home/<user_name> folder we can no longer ssh to the node
# !! IMPORTANT !!
if (
load_params.working_dir
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
):
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
return K6(
load_params,
["localhost:8080"],
k6_dir,
shell,
loader,
self.user,
)
def start(self):
load_params = self.k6_instances[0].load_params
self.cluster_state_controller.stop_services_of_type(S3Gate)
self.cluster_state_controller.stop_services_of_type(StorageNode)
parallel([k6.start for k6 in self.k6_instances])
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time)
@reporter.step("Restore passwd on {cluster_node}")
def restore_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("sudo chattr -i /etc/passwd")
@reporter.step("Lock passwd on {cluster_node}")
def lock_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("sudo chattr +i /etc/passwd")
def stop(self):
for k6_instance in self.k6_instances:
k6_instance.stop()
self.cluster_state_controller.start_all_stopped_services()
def get_results(self) -> dict:
results = {}
for k6_instance in self.k6_instances:
result = k6_instance.get_results()
results[k6_instance.loader.ip] = result
parallel(self.restore_passwd_on_node, self.nodes_under_load)
return results
class S3LocalRunner(LocalRunner):
endpoints: list[str]
k6_dir: str
@reporter.step("Run preset on loaders")
def preset(self):
LocalRunner.preset(self)
with reporter.step(f"Resolve containers in preset"):
parallel(self._resolve_containers_in_preset, self.k6_instances)
@reporter.step("Resolve containers in preset")
def _resolve_containers_in_preset(self, k6_instance: K6):
k6_instance.shell.exec(
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
)
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
futures = parallel(
self._init_k6_instance_,
self.loaders,
load_params,
endpoints,
k6_dir,
)
self.k6_instances = [future.result() for future in futures]
def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str):
shell = loader.get_shell()
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
with reporter.step(f"Make working directory"):
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
# If we chmod /home/<user_name> folder we can no longer ssh to the node
# !! IMPORTANT !!
if (
load_params.working_dir
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
):
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
return K6(
load_params,
self.endpoints,
k6_dir,
shell,
loader,
self.user,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
self.k6_dir = k6_dir
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes)
@reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]):
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
shell = cluster_node.host.get_shell()
with reporter.step("Uninstall previous installation of aws cli"):
shell.exec(f"sudo rm -rf /usr/local/aws-cli")
shell.exec(f"sudo rm -rf /usr/local/bin/aws")
shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer")
with reporter.step("Install aws cli"):
shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip")
shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}")
shell.exec(f"sudo {k6_dir}/aws/install")
with reporter.step("Install requests python module"):
shell.exec(f"sudo apt-get -y install python3-pip")
shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}")
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))

View file

@ -1,32 +0,0 @@
from importlib.metadata import entry_points
from typing import Any
def load_plugin(plugin_group: str, name: str) -> Any:
"""Loads plugin using entry point specification.
Args:
plugin_group: Name of plugin group that contains the plugin.
name: Name of the plugin in the group.
Returns:
Plugin class if the plugin was found; otherwise returns None.
"""
plugins = entry_points(group=plugin_group)
if name not in plugins.names:
return None
plugin = plugins[name]
return plugin.load()
def load_all(group: str) -> Any:
"""Loads all plugins using entry point specification.
Args:
group: Name of plugin group.
Returns:
Classes from specified group.
"""
plugins = entry_points(group=group)
return [plugin.load() for plugin in plugins]

View file

@ -1,280 +0,0 @@
from __future__ import annotations
import os
import uuid
from typing import Optional
from tenacity import retry
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed
from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.command_inspectors import SuInspector
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions
class RemoteProcess:
def __init__(
self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str
):
self.process_dir = process_dir
self.cmd = cmd
self.stdout_last_line_number = 0
self.stderr_last_line_number = 0
self.pid: Optional[str] = None
self.proc_rc: Optional[int] = None
self.proc_start_time: Optional[int] = None
self.proc_end_time: Optional[int] = None
self.saved_stdout: Optional[str] = None
self.saved_stderr: Optional[str] = None
self.shell = shell
self.proc_id: str = proc_id
self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else []
@classmethod
@reporter.step("Create remote process")
def create(
cls,
command: str,
shell: Shell,
working_dir: str = "/tmp",
user: Optional[str] = None,
proc_id: Optional[str] = None,
) -> RemoteProcess:
"""
Create a process on a remote host.
Created dir for process with following files:
command.sh: script to execute
pid: contains process id
rc: contains script return code
stderr: contains script errors
stdout: contains script output
user: user on behalf whom command will be executed
proc_id: process string identificator
Args:
shell: Shell instance
command: command to be run on a remote host
working_dir: working directory for the process
Returns:
RemoteProcess instance for further examination
"""
if proc_id is None:
proc_id = f"{uuid.uuid4()}"
cmd_inspector = SuInspector(user) if user else None
remote_process = cls(
cmd=command,
process_dir=os.path.join(working_dir, f"proc_{proc_id}"),
shell=shell,
cmd_inspector=cmd_inspector,
proc_id=proc_id,
)
return remote_process
@reporter.step("Start remote process")
def start(self):
"""
Starts a process on a remote host.
"""
self._create_process_dir()
self._generate_command_script()
self._start_process()
self.pid = self._get_pid()
@reporter.step("Get process stdout")
def stdout(self, full: bool = False) -> str:
"""
Method to get process stdout, either fresh info or full.
Args:
full: returns full stdout that we have to this moment
Returns:
Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned.
If process is finished (proc_rc is not None) saved stdout is returned
"""
if self.saved_stdout is not None:
cur_stdout = self.saved_stdout
else:
terminal = self.shell.exec(
f"cat {self.process_dir}/stdout",
options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors),
)
if self.proc_rc is not None:
self.saved_stdout = terminal.stdout
cur_stdout = terminal.stdout
if full:
return cur_stdout
whole_stdout = cur_stdout.split("\n")
if len(whole_stdout) > self.stdout_last_line_number:
resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :])
self.stdout_last_line_number = len(whole_stdout)
return resulted_stdout
return ""
@reporter.step("Get process stderr")
def stderr(self, full: bool = False) -> str:
"""
Method to get process stderr, either fresh info or full.
Args:
full: returns full stderr that we have to this moment
Returns:
Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned.
If process is finished (proc_rc is not None) saved stderr is returned
"""
if self.saved_stderr is not None:
cur_stderr = self.saved_stderr
else:
terminal = self.shell.exec(
f"cat {self.process_dir}/stderr",
options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors),
)
if self.proc_rc is not None:
self.saved_stderr = terminal.stdout
cur_stderr = terminal.stdout
if full:
return cur_stderr
whole_stderr = cur_stderr.split("\n")
if len(whole_stderr) > self.stderr_last_line_number:
resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :])
self.stderr_last_line_number = len(whole_stderr)
return resulted_stderr
return ""
@reporter.step("Get process rc")
def rc(self) -> Optional[int]:
if self.proc_rc is not None:
return self.proc_rc
result = self._cat_proc_file("rc")
if not result:
return None
self.proc_rc = int(result)
return self.proc_rc
@reporter.step("Get process start time")
def start_time(self) -> Optional[int]:
if self.proc_start_time is not None:
return self.proc_start_time
result = self._cat_proc_file("start_time")
if not result:
return None
self.proc_start_time = int(result)
return self.proc_start_time
@reporter.step("Get process end time")
def end_time(self) -> Optional[int]:
if self.proc_end_time is not None:
return self.proc_end_time
result = self._cat_proc_file("end_time")
if not result:
return None
self.proc_end_time = int(result)
return self.proc_end_time
def _cat_proc_file(self, file: str) -> Optional[str]:
terminal = self.shell.exec(
f"cat {self.process_dir}/{file}",
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True),
)
if "No such file or directory" in terminal.stderr:
return None
elif terminal.return_code != 0:
raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}")
return terminal.stdout
@reporter.step("Check if process is running")
def running(self) -> bool:
return self.rc() is None
@reporter.step("Send signal to process")
def send_signal(self, signal: int) -> None:
kill_res = self.shell.exec(
f"kill -{signal} {self.pid}",
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors),
)
if "No such process" in kill_res.stderr:
return
if kill_res.return_code:
raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}")
@reporter.step("Stop process")
def stop(self) -> None:
self.send_signal(15)
@reporter.step("Kill process")
def kill(self) -> None:
self.send_signal(9)
@reporter.step("Clear process directory")
def clear(self) -> None:
if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
@reporter.step("Start remote process")
def _start_process(self) -> None:
self.shell.exec(
f"nohup {self.process_dir}/command.sh </dev/null "
f">{self.process_dir}/stdout "
f"2>{self.process_dir}/stderr &",
CommandOptions(extra_inspectors=self.cmd_inspectors),
)
@reporter.step("Create process directory")
def _create_process_dir(self) -> None:
self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
self.process_dir = terminal.stdout.strip()
@reporter.step("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str:
terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors))
assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip()
@reporter.step("Generate command script")
def _generate_command_script(self) -> None:
command = self.cmd.replace('"', '\\"').replace("\\", "\\\\")
script = (
f"#!/bin/bash\n"
f"cd {self.process_dir}\n"
f"date +%s > {self.process_dir}/start_time\n"
f"{command} &\n"
f"pid=\$!\n"
f"cd {self.process_dir}\n"
f"echo \$pid > {self.process_dir}/pid\n"
f"wait \$pid\n"
f"echo $? > {self.process_dir}/rc\n"
f"date +%s > {self.process_dir}/end_time\n"
)
self.shell.exec(
f'echo "{script}" > {self.process_dir}/command.sh',
CommandOptions(extra_inspectors=self.cmd_inspectors),
)
self.shell.exec(
f"cat {self.process_dir}/command.sh",
CommandOptions(extra_inspectors=self.cmd_inspectors),
)
self.shell.exec(
f"chmod +x {self.process_dir}/command.sh",
CommandOptions(extra_inspectors=self.cmd_inspectors),
)

View file

@ -1,28 +0,0 @@
from typing import Any
from frostfs_testlib.reporter.allure_handler import AllureHandler
from frostfs_testlib.reporter.interfaces import ReporterHandler
from frostfs_testlib.reporter.reporter import Reporter
from frostfs_testlib.reporter.steps_logger import StepsLogger
__reporter = Reporter()
def get_reporter() -> Reporter:
"""Returns reporter that the library should use for storing artifacts.
Reporter is a singleton instance that can be configured with multiple handlers that store
artifacts in various systems. Most common use case is to use single handler.
Returns:
Singleton reporter instance.
"""
return __reporter
def step(title: str):
return __reporter.step(title)
def attach(content: Any, file_name: str):
return __reporter.attach(content, file_name)

View file

@ -1,42 +0,0 @@
import os
from contextlib import AbstractContextManager, ContextDecorator
from textwrap import shorten
from typing import Any, Callable
import allure
from allure import attachment_type
from frostfs_testlib.reporter.interfaces import ReporterHandler
class AllureHandler(ReporterHandler):
"""Handler that stores test artifacts in Allure report."""
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
name = shorten(name, width=140, placeholder="...")
return allure.step(name)
def step_decorator(self, name: str) -> Callable:
return allure.step(name)
def attach(self, body: Any, file_name: str) -> None:
attachment_name, extension = os.path.splitext(file_name)
if extension.startswith("."):
extension = extension[1:]
attachment_type = self._resolve_attachment_type(extension)
if os.path.exists(body):
allure.attach.file(body, file_name, attachment_type, extension)
else:
allure.attach(body, attachment_name, attachment_type, extension)
def _resolve_attachment_type(self, extension: str) -> attachment_type:
"""Try to find matching Allure attachment type by extension.
If no match was found, default to TXT format.
"""
extension = extension.lower()
return next(
(allure_type for allure_type in attachment_type if allure_type.extension == extension),
attachment_type.TEXT,
)

Some files were not shown because too many files have changed in this diff Show more