diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml deleted file mode 100644 index f3b8c51..0000000 --- a/.devenv.hosting.yaml +++ /dev/null @@ -1,109 +0,0 @@ -hosts: -- address: localhost - hostname: localhost - attributes: - sudo_shell: false - plugin_name: docker - healthcheck_plugin_name: basic - attributes: - skip_readiness_check: True - force_transactions: True - services: - - name: frostfs-storage_01 - attributes: - container_name: s01 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet01.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json - wallet_password: "" - volume_name: storage_storage_s01 - endpoint_data0: s01.frostfs.devenv:8080 - control_endpoint: s01.frostfs.devenv:8081 - un_locode: "RU MOW" - - name: frostfs-storage_02 - attributes: - container_name: s02 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet02.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json - wallet_password: "" - volume_name: storage_storage_s02 - endpoint_data0: s02.frostfs.devenv:8080 - control_endpoint: s02.frostfs.devenv:8081 - un_locode: "RU LED" - - name: frostfs-storage_03 - attributes: - container_name: s03 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet03.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json - wallet_password: "" - volume_name: storage_storage_s03 - endpoint_data0: s03.frostfs.devenv:8080 - control_endpoint: s03.frostfs.devenv:8081 - un_locode: "SE STO" - - name: frostfs-storage_04 - attributes: - container_name: s04 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet04.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json - wallet_password: "" - volume_name: storage_storage_s04 - endpoint_data0: s04.frostfs.devenv:8080 - control_endpoint: s04.frostfs.devenv:8081 - un_locode: "FI HEL" - - name: frostfs-s3_01 - attributes: - container_name: s3_gate - config_path: ../frostfs-dev-env/services/s3_gate/.s3.env - wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json - local_wallet_config_path: ./TemporaryDir/password-s3.yml - local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json - wallet_password: "s3" - endpoint_data0: https://s3.frostfs.devenv:8080 - - name: frostfs-http_01 - attributes: - container_name: http_gate - config_path: ../frostfs-dev-env/services/http_gate/.http.env - wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json - wallet_password: "one" - endpoint_data0: http://http.frostfs.devenv - - name: frostfs-ir_01 - attributes: - container_name: ir01 - config_path: ../frostfs-dev-env/services/ir/.ir.env - wallet_path: ../frostfs-dev-env/services/ir/az.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/ir/az.json - wallet_password: "one" - - name: neo-go_01 - attributes: - container_name: morph_chain - config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml - wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json - wallet_password: "one" - endpoint_internal0: http://morph-chain.frostfs.devenv:30333 - - name: main-chain_01 - attributes: - container_name: main_chain - config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml - wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json - wallet_password: "one" - endpoint_internal0: http://main-chain.frostfs.devenv:30333 - - name: coredns_01 - attributes: - container_name: coredns - clis: - - name: frostfs-cli - exec_path: frostfs-cli diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml deleted file mode 100644 index 9aa0d31..0000000 --- a/.forgejo/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO action -on: [pull_request] - -jobs: - dco: - name: DCO - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.21' - - - name: Run commit format checker - uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 - with: - from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 4691fe4..0000000 --- a/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# ignore IDE files -.vscode -.idea -venv.* - -# ignore temp files under any path -.DS_Store -**/__pycache__ - -# ignore build artifacts -/dist -/build -*.egg-info -wallet_config.yml \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 6a9716a..0000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -repos: - - repo: https://github.com/psf/black - rev: 22.8.0 - hooks: - - id: black - language_version: python3.10 - - repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - name: isort (python) diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index 519ca42..0000000 --- a/CODEOWNERS +++ /dev/null @@ -1,3 +0,0 @@ -.* @TrueCloudLab/qa-committers -.forgejo/.* @potyarkin -Makefile @potyarkin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 69417d2..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,225 +0,0 @@ -# Contribution guide - -First, thank you for contributing! We love and encourage pull requests from -everyone. Please follow the guidelines: - -- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and - [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing - discussions. - -- Open an issue first, to discuss a new feature or enhancement. - -- Write tests, and make sure the test suite passes locally. - -- Open a pull request, and reference the relevant issue(s). - -- Make sure your commits are logically separated and have good comments - explaining the details of your change. - -- After receiving feedback, amend your commits or add new ones as appropriate. - -- **Have fun!** - -## Development Workflow - -Start by forking the `frostfs-testlib` repository, make changes in a branch and then -send a pull request. We encourage pull requests to discuss code changes. Here -are the steps in details: - -### Set up your Git Repository -Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source -repository to your own personal repository. Copy the URL of your fork and clone it: - -```shell -$ git clone -``` - -### Set up git remote as ``upstream`` -```shell -$ cd frostfs-testlib -$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib -$ git fetch upstream -``` - -### Set up development environment -To setup development environment for `frostfs-testlib`, please, take the following steps: - -```shell -$ make venv -$ source venv.frostfs-testlib/bin/activate -``` - -Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: -* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well. -* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html). - -### Create your feature branch -Before making code changes, make sure you create a separate branch for these -changes. Maybe you will find it convenient to name branch in -`/-` format. - -```shell -$ git checkout -b feature/123-something_awesome -``` - -### Test your changes -Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command: -```shell -$ make validation -``` - -To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: -``` -SSH_SHELL_HOST =
-SSH_SHELL_LOGIN = -SSH_SHELL_PRIVATE_KEY_PATH = -SSH_SHELL_PRIVATE_KEY_PASSPHRASE = -``` - -### Commit changes -After verification, commit your changes. There is a [great -post](https://chris.beams.io/posts/git-commit/) on how to write useful commit -messages. Try following this template: - -``` -[#Issue] Summary -Description - - -``` - -```shell -$ git commit -am '[#123] Add some feature' -``` - -### Push to the branch -Push your locally committed changes to the remote origin (your fork): -```shell -$ git push origin feature/123-something_awesome -``` - -### Create a Pull Request -Pull requests can be created via Git. Refer to [this -document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for -detailed steps on how to create a pull request. After a Pull Request gets peer -reviewed and approved, it will be merged. - -## DCO Sign off - -All authors to the project retain copyright to their work. However, to ensure -that they are only submitting work that they have rights to, we are requiring -everyone to acknowledge this by signing their work. - -Any copyright notices in this repository should specify the authors as "the -contributors". - -To sign your work, just add a line like this at the end of your commit message: - -``` -Signed-off-by: Andrey Berezin -``` - -This can easily be done with the `--signoff` option to `git commit`. - -By doing this you state that you can certify the following (from [The Developer -Certificate of Origin](https://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. -Developer's Certificate of Origin 1.1 -By making a contribution to this project, I certify that: -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -## Code Style -We use `black` and `isort` for code formatting. Please, refer to [Black code style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) for details. - -Type hints are mandatory for library's code: - - class attributes; - - function or method's parameters; - - function or method's return type. - -The only exception is return type of test functions or methods - there's no much use in specifying `None` as return type for each test function. - -Do not use relative imports. Even if the module is in the same package, use the full package name. - -To format docstrings, please, use [Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type annotations should be specified in the code and not in docstrings (please, refer to [this sample](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#type-annotations)). - -## Editable installation -If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `frostfs-testlib` directory might be different on your machine): -```shell -$ pip install -e ../frostfs-testlib -``` - -# Maintaining guide - -## Versioning -We follow [Semantic Versioning Specification](https://semver.org/) to version this library. To manage version number in the source code, we use [bumpver](https://pypi.org/project/bumpver/) package. - -To update a version of the library, please, take the following steps: -1. Make sure that your have no pending changes in git. -2. Run the following command to update version and commit it to git: - ```shell - $ bumpver update --major # to update major version - $ bumpver update --minor # to update minor version - $ bumpver update --patch # to update the patch component of the version - ``` -3. Sign-off the created commit: - ```shell - $ git commit --amend --signoff - ``` -4. Push the changes to remote. -5. After this commit is merged to upstream, create a tag on the master branch of upstream. Tag name should be formatted as "v{new_version}": - ```shell - $ git tag v - $ git push upstream v - ``` - -## Building and publishing package -To build Python package of the library, please run the following command in the library root directory: -```shell -$ python -m build -``` - -This command will put wheel file and source archive under `dist` directory. - -To check that package description will be correctly rendered at PyPI, please, use command: -```shell -$ twine check dist/* -``` - -To upload package to [test PyPI](https://test.pypi.org/project/frostfs-testlib/), please, use command: -```shell -$ twine upload -r testpypi dist/* -``` -It will prompt for your username and password. You would need to [create test PyPI account](https://test.pypi.org/account/register/) in order to execute it. - -To upload package to actual PyPI, please, use command: -```shell -$ twine upload dist/* -``` -It will prompt for your username and password. You would need to [create PyPI account](https://pypi.org/account/register/) in order to execute it. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index f288702..0000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/Makefile b/Makefile deleted file mode 100644 index 644eab0..0000000 --- a/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -SHELL := /bin/bash -PYTHON_VERSION := 3.10 -VENV_NAME := frostfs-testlib -VENV_DIR := venv.${VENV_NAME} - -current_dir := $(shell pwd) -DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) -FROM_VENV := . ${VENV_DIR}/bin/activate && - -venv: create requirements paths precommit - @echo Ready - -precommit: - @echo Isntalling pre-commit hooks - . ${VENV_DIR}/bin/activate && pre-commit install - -paths: - @echo Append paths for project - @echo Virtual environment: ${current_dir}/${VENV_DIR} - @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - -create: ${VENV_DIR} - -${VENV_DIR}: - @echo Create virtual environment ${current_dir}/${VENV_DIR} - virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR} - -requirements: - @echo Isntalling pip requirements - . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt - - -#### VALIDATION SECTION #### -lint: create requirements - ${FROM_VENV} pylint --disable R,C,W ./src - -unit_test: - @echo Starting unit tests - ${FROM_VENV} python -m pytest tests - -.PHONY: lint_dependent $(DIRECTORIES) -lint_dependent: $(DIRECTORIES) - -$(DIRECTORIES): - @echo checking dependent repo $@ - $(MAKE) validation -C $@ - -validation: lint unit_test lint_dependent \ No newline at end of file diff --git a/README.md b/README.md index 2f8751f..7463f9e 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,3 @@ -# frostfs-testlib -This library provides building blocks and utilities to facilitate development of automated tests for FrostFS system. +# WIP area: this repo is just a fork! -## Installation -Library can be installed via pip: -```shell -$ pip install frostfs-testlib -``` - -## Configuration -Some library components support configuration that allows dynamic loading of extensions via plugins. Configuration of such components is described in this section. - -### Reporter Configuration -Reporter is a singleton component that is used by the library to store test artifacts. - -Reporter sends artifacts to handlers that are responsible for actual storing in particular system. By default reporter is initialized without any handlers and won't take any actions to store the artifacts. To add handlers directly via code you can use method `register_handler`: - -```python -from frostfs_testlib.reporter import AllureHandler, get_reporter - -get_reporter().register_handler(AllureHandler()) -``` - -This registration should happen early at the test session, because any artifacts produced before handler is registered won't be stored anywhere. - -Alternative approach for registering handlers is to use method `configure`. It is similar to method [dictConfig](https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig) in a sense that it receives a config structure that describes handlers that should be registered in the reporter. Each handler is defined by it's plugin name; for example, to register the built-in Allure handler, we can use the following config: - -```python -get_reporter().configure({ "handlers": [{"plugin_name": "allure"}] }) -``` - -### Hosting Configuration -Hosting component is a class that represents infrastructure (machines/containers/services) where frostFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `frostfs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`: - -```python -from frostfs_testlib.hosting import Hosting - -hosting = Hosting() -hosting.configure({ "hosts": [{ "address": "localhost", "plugin_name": "docker" ... }]}) -``` - -## Plugins -Testlib uses [entrypoint specification](https://docs.python.org/3/library/importlib.metadata.html) for plugins. Testlib supports the following entrypoint groups for plugins: - - `frostfs.testlib.reporter` - group for reporter handler plugins. Plugin should be a class that implements interface `frostfs_testlib.reporter.interfaces.ReporterHandler`. - -### Example reporter plugin -In this example we will consider two Python projects: - - Project "my_frostfs_plugins" where we will build a plugin that extends testlib functionality. - - Project "my_frostfs_tests" that uses "frostfs_testlib" and "my_frostfs_plugins" to build some tests. - -Let's say we want to implement some custom reporter handler that can be used as a plugin for testlib. Pseudo-code of implementation can look like that: -```python -# File my_frostfs_plugins/src/foo/bar/custom_handler.py -from contextlib import AbstractContextManager -from frostfs_testlib.reporter import ReporterHandler - - -class CustomHandler(ReporterHandler): - def step(self, name: str) -> AbstractContextManager: - ... some implementation ... - - def attach(self, content: Any, file_name: str) -> None: - ... some implementation ... -``` - -Then in the file `pyproject.toml` of "my_frostfs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `frostfs.testlib.reporter`: -```yaml -# File my_frostfs_plugins/pyproject.toml -[project.entry-points."frostfs.testlib.reporter"] -my_custom_handler = "foo.bar.custom_handler:CustomHandler" -``` - -Finally, to use this handler in our test project "my_frostfs_tests", we should configure reporter with name of the handler plugin: - -```python -# File my_frostfs_tests/src/conftest.py -from frostfs_testlib.reporter import get_reporter - -get_reporter().configure({ "handlers": [{"plugin_name": "my_custom_handler"}] }) -``` - -Detailed information about registering entrypoints can be found at [setuptools docs](https://setuptools.pypa.io/en/latest/userguide/entry_point.html). - -## Library structure -The library provides the following primary components: - * `blockchain` - Contains helpers that allow to interact with neo blockchain, smart contracts, gas transfers, etc. - * `cli` - wrappers on top of frostFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. - * `hosting` - management of infrastructure (docker, virtual machines, services where frostFS is hosted). The library provides host implementation for docker environment (when frostFS services are running as docker containers). Support for other hosts is provided via plugins. - * `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance. - * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. - * `utils` - Support functions. - - -## Contributing -Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md). +Useful things may be published only in [other branches](../../../branches) diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index d62f04b..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,95 +0,0 @@ -[build-system] -requires = ["setuptools>=65.0.0", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -name = "frostfs-testlib" -version = "2.0.1" -description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" -readme = "README.md" -authors = [{ name = "Yadro", email = "info@yadro.com" }] -license = { text = "GNU General Public License v3 (GPLv3)" } -classifiers = [ - "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", - "Programming Language :: Python", - "Programming Language :: Python :: 3", -] -keywords = ["frostfs", "test"] -dependencies = [ - "allure-python-commons>=2.13.2", - "docker>=4.4.0", - "pyyaml==6.0.1", - "neo-mamba==1.0.0", - "paramiko>=2.10.3", - "pexpect>=4.8.0", - "requests==2.28.1", - "docstring_parser>=0.15", - "testrail-api>=1.12.0", - "pytest==7.1.2", - "tenacity==8.0.1", - "boto3==1.35.30", - "boto3-stubs[s3,iam,sts]==1.35.30", -] -requires-python = ">=3.10" - -[project.optional-dependencies] -dev = ["black", "bumpver", "isort", "pre-commit"] - -[project.urls] -Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib" - -[project.entry-points."frostfs.testlib.reporter"] -allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" - -[project.entry-points."frostfs.testlib.hosting"] -docker = "frostfs_testlib.hosting.docker_host:DockerHost" - -[project.entry-points."frostfs.testlib.healthcheck"] -basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" - -[project.entry-points."frostfs.testlib.csc_managers"] -config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" - -[project.entry-points."frostfs.testlib.services"] -frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" -frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" -frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" -neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" -frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" - -[project.entry-points."frostfs.testlib.credentials_providers"] -authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider" -wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" - -[project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" - -[tool.isort] -profile = "black" -src_paths = ["src", "tests"] -line_length = 140 - -[tool.black] -line-length = 140 -target-version = ["py310"] - -[tool.bumpver] -current_version = "2.0.1" -version_pattern = "MAJOR.MINOR.PATCH" -commit_message = "Bump version {old_version} -> {new_version}" -commit = true -tag = false -push = false - -[tool.bumpver.file_patterns] -"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] -"src/frostfs_testlib/__init__.py" = ["{version}"] - -[tool.pytest.ini_options] -filterwarnings = [ - "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", -] -testpaths = ["tests"] - -[project.entry-points.pytest11] -testlib = "frostfs_testlib" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 56d9b83..0000000 --- a/requirements.txt +++ /dev/null @@ -1,25 +0,0 @@ -allure-python-commons==2.13.2 -docker==4.4.0 -neo-mamba==1.0.0 -paramiko==2.10.3 -pexpect==4.8.0 -requests==2.28.1 -docstring_parser==0.15 -testrail-api==1.12.0 -tenacity==8.0.1 -pytest==7.1.2 -boto3==1.35.30 -boto3-stubs[s3,iam,sts]==1.35.30 -pydantic==2.10.6 - -# Dev dependencies -black==22.8.0 -bumpver==2022.1118 -isort==5.12.0 -pre-commit==2.20.0 -pylint==2.17.4 - -# Packaging dependencies -build==0.8.0 -setuptools==65.3.0 -twine==4.0.1 \ No newline at end of file diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py deleted file mode 100644 index 4724a8b..0000000 --- a/src/frostfs_testlib/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -__version__ = "2.0.1" - -from .fixtures import configure_testlib, hosting, session_start_time, temp_directory -from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py deleted file mode 100644 index b057418..0000000 --- a/src/frostfs_testlib/analytics/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from frostfs_testlib.analytics import test_case -from frostfs_testlib.analytics.test_case import TestCasePriority -from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector -from frostfs_testlib.analytics.test_exporter import TСExporter -from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_case.py b/src/frostfs_testlib/analytics/test_case.py deleted file mode 100644 index c6e7ff5..0000000 --- a/src/frostfs_testlib/analytics/test_case.py +++ /dev/null @@ -1,82 +0,0 @@ -import allure - -from enum import Enum -from types import FunctionType - -class TestCasePriority(Enum): - HIGHEST = 0 - HIGH = 1 - MEDIUM = 2 - LOW = 3 - -def __set_label__(name: str, value: str, allure_decorator: FunctionType = None): - """ - Generic function for do not duplicate set label code in each decorator. - We get decorated function as an object and set needed argument inside. - - Args: - name: argument name to set into the function object - value: argument value to set into the function object - allure_decorator: allure decorator to decorate function and do not duplicate decorators with same value - """ - def wrapper(decorated_func): - if allure_decorator: - decorated_func = allure_decorator(value)(decorated_func) - setattr(decorated_func, name, value) - return decorated_func - - return wrapper - - -def id(uuid: str): - """ - Decorator for set test case ID which can be used as unique value due export into TMS. - - We prefer to use UUID4 format string for ID. - ID have to be generated manually for each new test. - - Args: - uuid: id to set as test_case_id into test function - """ - return __set_label__("__test_case_id__", uuid) - - -def title(title: str): - """ - Decorator for set test case title / name / summary / short description what we do. - - Args: - title: string with title to set into test function - """ - - return __set_label__("__test_case_title__", title, allure.title) - -def priority(priority: str): - """ - Decorator for set test case title / name / summary / short description what we do. - - Args: - priority: string with priority to set into test function - """ - - return __set_label__("__test_case_priority__", priority) - - -def suite_name(name: str): - """ - Decorator for set test case suite name. - Suite name is usually using in TMS for create structure of test cases. - - Args: - name: string with test suite name for set into test function - """ - - return __set_label__("__test_case_suite_name__", name, allure.story) - - -def suite_section(name: str): - """ - Decorator for set test case suite section. - Suite section is usually using in TMS for create deep test cases structure. - """ - return __set_label__("__test_case_suite_section__", name) diff --git a/src/frostfs_testlib/analytics/test_collector.py b/src/frostfs_testlib/analytics/test_collector.py deleted file mode 100644 index 56ee606..0000000 --- a/src/frostfs_testlib/analytics/test_collector.py +++ /dev/null @@ -1,199 +0,0 @@ -import re - -from docstring_parser import parse -from docstring_parser.common import DocstringStyle -from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType - -DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) - - -class TestCase: - """ - Test case object implementation for use in collector and exporters - """ - - def __init__( - self, - uuid_id: str, - title: str, - description: str, - priority: int, - steps: dict, - params: str, - suite_name: str, - suite_section_name: str, - ): - """ - Base constructor for TestCase object - - Args: - uuid_id: uuid from id decorator - title: test case title from title decorator - priority: test case priority value (0-3) - steps: list of test case steps read from function __doc__ - params: string with test case param read from pytest Function(test) object - suite_name: test case suite name from test_suite decorator - suite_section_name: test case suite section from test_suite_section decorator - """ - - # It can confuse, but we rewrite id to "id [params]" string - # We do it in case that one functions can return a lot of tests if we use test params - if params: - self.id = f"{uuid_id} [{params}]" - else: - self.id: str = uuid_id - self.title: str = title - self.description: str = description - self.priority: int = priority - self.steps: dict = steps - self.params: str = params - self.suite_name: str = suite_name - self.suite_section_name: str = suite_section_name - - -class TestCaseCollector: - """ - Collector working like a plugin for pytest and can be used in collect-only call to get tests list from pytest - Additionally, we have several function to filter tests that can be exported. - """ - - pytest_tests = [] - - def __format_string_with_params__(self, source_string: str, test_params: dict) -> str: - """ - Helper function for format test case string arguments using test params. - Params name can be deep like a.b.c, so we will get the value from tests params. - Additionally, we check is the next object dict or real object to use right call for get next argument. - - Args: - source_string: string for format by using test params (if needed) - test_params: dictionary with test params got from pytest test object - Returns: - (str): formatted string with replaced params name by params value - """ - - target_string: str = source_string - for match in re.findall(r"\{(.*?)}", source_string): - nestings_attrs = match.split(".") - param = None - for nesting_attr in nestings_attrs: - if not param: - param = test_params.get(nesting_attr) - else: - if isinstance(param, dict): - param = param.get(nesting_attr) - else: - param = getattr(param, nesting_attr) - target_string = target_string.replace(f"{{{match}}}", str(param)) - return target_string - - def __get_test_case_from_pytest_test__(self, test) -> TestCase: - """ - Parce test meta and return test case if there is enough information for that. - - Args: - test: pytest Function object - Returns: - (TestCase): return tests cases if there is enough information for that and None if not - """ - - # Default values for use behind - suite_name: str = None - suite_section_name: str = None - test_case_steps = dict() - test_case_params: str = "" - test_case_description: str = "" - - # Read test_case suite and section name from test class if possible and get test function from class - if test.cls: - suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test.cls.__dict__.get( - "__test_case_suite_section__", suite_section_name - ) - test_function = test.cls.__dict__[test.originalname] - else: - # If no test class, read test function from module - test_function = test.module.__dict__[test.originalname] - - # Read base values from test function arguments - test_case_id = test_function.__dict__.get("__test_case_id__", None) - test_case_title = test_function.__dict__.get("__test_case_title__", None) - test_case_priority = test_function.__dict__.get("__test_case_priority__", None) - suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test_function.__dict__.get( - "__test_case_suite_section__", suite_section_name - ) - - # Parce test_steps if they define in __doc__ - doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) - - if doc_string.short_description: - test_case_description = doc_string.short_description - if doc_string.long_description: - test_case_description = ( - f"{doc_string.short_description}\r\n{doc_string.long_description}" - ) - - if doc_string.meta: - for meta in doc_string.meta: - if meta.args[0] == "steps": - test_case_steps[meta.args[1]] = meta.description - - # Read params from tests function if its exist - test_case_call_spec = getattr(test, "callspec", "") - - if test_case_call_spec: - # Set test cases params string value - test_case_params = test_case_call_spec.id - # Format title with params - if test_case_title: - test_case_title = self.__format_string_with_params__( - test_case_title, test_case_call_spec.params - ) - # Format steps with params - if test_case_steps: - for key, value in test_case_steps.items(): - value = self.__format_string_with_params__(value, test_case_call_spec.params) - test_case_steps[key] = value - - # If there is set basic test case attributes create TestCase and return - if test_case_id and test_case_title and suite_name and suite_name: - test_case = TestCase( - uuid_id=test_case_id, - title=test_case_title, - description=test_case_description, - priority=test_case_priority, - steps=test_case_steps, - params=test_case_params, - suite_name=suite_name, - suite_section_name=suite_section_name, - ) - return test_case - # Return None if there is no enough information for return test case - return None - - def pytest_report_collectionfinish(self, pytest_tests: list) -> None: - """ - !!! DO NOT CHANGE THE NANE IT IS NOT A MISTAKE - Implement specific function with specific name - Pytest will be call this function when he uses plugin in collect-only call - - Args: - pytest_tests: list of pytest tests - """ - self.pytest_tests.extend(pytest_tests) - - def collect_test_cases(self) -> list[TestCase]: - """ - We're collecting test cases from the pytest tests list and return them in test case representation. - - Returns: - (list[TestCase]): list of test cases that we found in the pytest tests code - """ - test_cases = [] - - for test in self.pytest_tests: - test_case = self.__get_test_case_from_pytest_test__(test) - if test_case: - test_cases.append(test_case) - return test_cases diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py deleted file mode 100644 index dd6a7fb..0000000 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ /dev/null @@ -1,69 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.analytics.test_collector import TestCase - - -# TODO: REMOVE ME -class TСExporter(ABC): - test_cases_cache = [] - test_suites_cache = [] - - @abstractmethod - def fill_suite_cache(self) -> None: - """ - Fill test_suite_cache by all tests cases in TMS - It's help do not call TMS each time then we search test suite - """ - - @abstractmethod - def fill_cases_cache(self) -> None: - """ - Fill test_cases_cache by all tests cases in TMS - It's help do not call TMS each time then we search test case - """ - - @abstractmethod - def search_test_case_id(self, test_case_id: str) -> object: - """ - Find test cases in TMS by ID - """ - - @abstractmethod - def get_or_create_test_suite(self, test_suite_name: str) -> object: - """ - Get suite name with exact name or create if not exist - """ - - @abstractmethod - def get_or_create_suite_section(self, test_rail_suite, section_name: str) -> object: - """ - Get suite section with exact name or create new one if not exist - """ - - @abstractmethod - def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None: - """ - Create test case in TMS - """ - - @abstractmethod - def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: - """ - Update test case in TMS - """ - - def export_test_cases(self, test_cases: list[TestCase]): - # Fill caches before starting imports - self.fill_suite_cache() - self.fill_cases_cache() - - for test_case in test_cases: - test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) - test_case_in_tms = self.search_test_case_id(test_case.id) - steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] - - if test_case_in_tms: - self.update_test_case(test_case, test_case_in_tms, test_suite, test_section) - else: - self.create_test_case(test_case, test_suite, test_section) diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py deleted file mode 100644 index 36c482c..0000000 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ /dev/null @@ -1,159 +0,0 @@ -from testrail_api import TestRailAPI - -from frostfs_testlib.analytics.test_collector import TestCase -from frostfs_testlib.analytics.test_exporter import TСExporter - - -class TestrailExporter(TСExporter): - def __init__( - self, - tr_url: str, - tr_username: str, - tr_password: str, - tr_project_id: int, - tr_template_id_without_steps: int, - tr_template_id_with_steps: int, - tr_priority_map: dict, - tr_id_field: str, - tr_description_fields: str, - tr_steps_field: str, - ): - """ - Redefine init for base exporter for get test rail credentials and project on create exporter - - Args: - tr_url: api url for create TestRailAPI object. See lib docs for details - tr_username: Testrail user login for api authentication - tr_password: Testrail user password for api authentication - tr_template_id_with_steps: id of test case template with steps - tr_template_id_without_steps: id of test case template without steps - tr_priority_map: mapping of TestCasePriority to priority ids in Testrail - """ - - self.api: TestRailAPI = TestRailAPI(tr_url, tr_username, tr_password) - self.tr_project_id: int = tr_project_id - self.tr_template_id_without_steps = tr_template_id_without_steps - self.tr_template_id_with_steps = tr_template_id_with_steps - self.tr_priority_map = tr_priority_map - self.tr_id_field = tr_id_field - self.tr_description_fields = tr_description_fields - self.tr_steps_field = tr_steps_field - self.test_case_id_field_name = "" # TODO: Add me - - def fill_suite_cache(self) -> None: - """ - Fill test_suite_cache by all tests cases in TestRail - It's help do not call TMS each time then we search test suite - """ - project_suites = self.api.suites.get_suites(project_id=self.tr_project_id) - - for test_suite in project_suites: - test_suite_sections = self.api.sections.get_sections( - project_id=self.tr_project_id, - suite_id=test_suite["id"], - ) - test_suite["sections"] = test_suite_sections - - self.test_suites_cache.append(test_suite) - - def fill_cases_cache(self) -> None: - """ - Fill test_cases_cache by all tests cases in TestRail - It's help do not call TMS each time then we search test case - """ - for test_suite in self.test_suites_cache: - self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) - - def search_test_case_id(self, test_case_id: str) -> object: - """ - Find test cases in TestRail (cache) by ID - """ - test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] - - if len(test_cases) > 1: - raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") - elif len(test_cases) == 1: - return test_cases.pop() - else: - return None - - def get_or_create_test_suite(self, test_suite_name) -> object: - """ - Get suite name with exact name from Testrail or create if not exist - """ - test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] - - if not test_rail_suites: - test_rail_suite = self.api.suites.add_suite( - project_id=self.tr_project_id, - name=test_suite_name, - ) - test_rail_suite["sections"] = list() - self.test_suites_cache.append(test_rail_suite) - return test_rail_suite - elif len(test_rail_suites) == 1: - return test_rail_suites.pop() - else: - raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") - - def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: - """ - Get suite section with exact name from Testrail or create new one if not exist - """ - test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] - - if not test_rail_sections: - test_rail_section = self.api.sections.add_section( - project_id=self.tr_project_id, - suite_id=test_rail_suite["id"], - name=section_name, - ) - # !!!!!! BAD !!!!!! Do we really change object from cache or copy of suite object???? - # !!!!!! WE have to update object in cache - # !!!!! In opposite we will try to create section twice and get error from API - test_rail_suite["sections"].append(test_rail_section) - return test_rail_section - elif len(test_rail_sections) == 1: - return test_rail_sections.pop() - else: - raise RuntimeError(f"Too many results found in test rail for section name {section_name}") - - def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: - """ - Helper to prepare request body for add or update tests case from TestCase object - """ - request_body = { - "title": test_case.title, - "section_id": test_suite_section["id"], - self.test_case_id_field_name: test_case.id, - } - - if test_case.priority: - request_body["priority_id"] = self.tr_priority_map.get(test_case.priority) - - if test_case.steps: - steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] - request_body[self.tr_steps_field] = steps - request_body["template_id"] = self.tr_template_id_with_steps - else: - request_body["template_id"] = self.tr_template_id_without_steps - if test_case.description: - request_body[self.tr_description_fields] = self.tr_description_fields - - return request_body - - def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None: - """ - Create test case in Testrail - """ - request_body = self.prepare_request_body(test_case, test_suite, test_suite_section) - - self.api.cases.add_case(**request_body) - - def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: - """ - Update test case in Testrail - """ - request_body = self.prepare_request_body(test_case, test_suite, test_suite_section) - - self.api.cases.update_case(case_id=test_case_in_tms["id"], **request_body) diff --git a/src/frostfs_testlib/blockchain/__init__.py b/src/frostfs_testlib/blockchain/__init__.py deleted file mode 100644 index ceca6a2..0000000 --- a/src/frostfs_testlib/blockchain/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from frostfs_testlib.blockchain.multisig import Multisig -from frostfs_testlib.blockchain.rpc_client import RPCClient diff --git a/src/frostfs_testlib/blockchain/multisig.py b/src/frostfs_testlib/blockchain/multisig.py deleted file mode 100644 index cd71f68..0000000 --- a/src/frostfs_testlib/blockchain/multisig.py +++ /dev/null @@ -1,51 +0,0 @@ -from frostfs_testlib.cli import NeoGo - - -class Multisig: - def __init__(self, neogo: NeoGo, invoke_tx_file: str, block_period: int): - self.neogo = neogo - self.invoke_tx_file = invoke_tx_file - self.block_period = block_period - - def create_and_send( - self, - contract_hash: str, - contract_args: str, - multisig_hash: str, - wallets: list[str], - passwords: list[str], - address: str, - endpoint: str, - ) -> None: - if not len(wallets): - raise AttributeError("Got empty wallets list") - - self.neogo.contract.invokefunction( - address=address, - rpc_endpoint=endpoint, - wallet=wallets[0], - wallet_password=passwords[0], - out=None if len(wallets) == 1 else self.invoke_tx_file, - scripthash=contract_hash, - arguments=contract_args, - multisig_hash=multisig_hash, - ) - - if len(wallets) > 1: - # sign with rest of wallets except the last one - for wallet in wallets[1:-1]: - self.neogo.wallet.sign( - wallet=wallet, - input_file=self.invoke_tx_file, - out=self.invoke_tx_file, - address=address, - ) - - # sign tx with last wallet and push it to blockchain - self.neogo.wallet.sign( - wallet=wallets[-1], - input_file=self.invoke_tx_file, - out=self.invoke_tx_file, - address=address, - rpc_endpoint=endpoint, - ) diff --git a/src/frostfs_testlib/blockchain/role_designation.py b/src/frostfs_testlib/blockchain/role_designation.py deleted file mode 100644 index 4535300..0000000 --- a/src/frostfs_testlib/blockchain/role_designation.py +++ /dev/null @@ -1,155 +0,0 @@ -import json -from time import sleep -from typing import Optional - -from frostfs_testlib.blockchain import Multisig -from frostfs_testlib.cli import NeoGo -from frostfs_testlib.shell import Shell -from frostfs_testlib.utils.converting_utils import process_b64_bytearray - - -class RoleDesignation: - def __init__( - self, - shell: Shell, - neo_go_exec_path: str, - block_period: int, - designate_contract: str, - ): - self.neogo = NeoGo(shell, neo_go_exec_path) - self.block_period = block_period - self.designate_contract = designate_contract - - def set_notary_nodes( - self, - addr: str, - pubkeys: list[str], - script_hash: str, - wallet: str, - passwd: str, - endpoint: str, - ) -> str: - keys = [f"bytes:{k}" for k in pubkeys] - keys_str = " ".join(keys) - out = self.neogo.contract.invokefunction( - address=addr, - scripthash=self.designate_contract, - wallet=wallet, - wallet_password=passwd, - rpc_endpoint=endpoint, - arguments=f"designateAsRole int:32 [ {keys_str} ] -- {script_hash}", - force=True, - ) - sleep(self.block_period) - return out.stdout.split(" ")[-1] - - def set_inner_ring( - self, - addr: str, - pubkeys: list[str], - script_hash: str, - wallet: str, - passwd: str, - endpoint: str, - ) -> str: - keys = [f"bytes:{k}" for k in pubkeys] - keys_str = " ".join(keys) - out = self.neogo.contract.invokefunction( - address=addr, - scripthash=self.designate_contract, - wallet=wallet, - wallet_password=passwd, - rpc_endpoint=endpoint, - arguments=f"designateAsRole int:16 [ {keys_str} ] -- {script_hash}", - force=True, - ) - sleep(self.block_period) - return out.stdout.split(" ")[-1] - - def set_oracles( - self, - addr: str, - pubkeys: list[str], - script_hash: str, - wallet: str, - passwd: str, - endpoint: str, - ) -> str: - keys = [f"bytes:{k}" for k in pubkeys] - keys_str = " ".join(keys) - out = self.neogo.contract.invokefunction( - address=addr, - scripthash=self.designate_contract, - wallet=wallet, - wallet_password=passwd, - rpc_endpoint=endpoint, - arguments=f"designateAsRole int:8 [ {keys_str} ] -- {script_hash}", - force=True, - ) - sleep(self.block_period) - return out.stdout.split(" ")[-1] - - def set_notary_nodes_multisig_tx( - self, - pubkeys: list[str], - script_hash: str, - wallets: list[str], - passwords: list[str], - address: str, - endpoint: str, - invoke_tx_file: str, - ) -> None: - keys = [f"bytes:{k}" for k in pubkeys] - keys_str = " ".join(keys) - multisig = Multisig( - self.neogo, invoke_tx_file=invoke_tx_file, block_period=self.block_period - ) - multisig.create_and_send( - self.designate_contract, - f"designateAsRole int:32 [ {keys_str} ]", - script_hash, - wallets, - passwords, - address, - endpoint, - ) - sleep(self.block_period) - - def set_inner_ring_multisig_tx( - self, - pubkeys: list[str], - script_hash: str, - wallets: list[str], - passwords: list[str], - address: str, - endpoint: str, - invoke_tx_file: str, - ) -> None: - keys = [f"bytes:{k}" for k in pubkeys] - keys_str = " ".join(keys) - multisig = Multisig( - self.neogo, invoke_tx_file=invoke_tx_file, block_period=self.block_period - ) - multisig.create_and_send( - self.designate_contract, - f"designateAsRole int:16 [ {keys_str} ]", - script_hash, - wallets, - passwords, - address, - endpoint, - ) - sleep(self.block_period) - - def check_candidates(self, contract_hash: str, endpoint: str) -> Optional[list[str]]: - out = self.neogo.contract.testinvokefunction( - scripthash=contract_hash, - method="innerRingCandidates", - rpc_endpoint=endpoint, - ) - output_dict = json.loads(out.stdout.replace("\n", "")) - candidates = output_dict["stack"][0]["value"] - if len(candidates) == 0: - return None - # TODO: return a list of keys - return [process_b64_bytearray(candidate["value"][0]["value"]) for candidate in candidates] diff --git a/src/frostfs_testlib/blockchain/rpc_client.py b/src/frostfs_testlib/blockchain/rpc_client.py deleted file mode 100644 index 25286d0..0000000 --- a/src/frostfs_testlib/blockchain/rpc_client.py +++ /dev/null @@ -1,80 +0,0 @@ -import json -import logging -from typing import Any, Dict, Optional - -import requests - -logger = logging.getLogger("frostfs.testlib.blockchain") - - -class NeoRPCException(Exception): - pass - - -class RPCClient: - def __init__(self, endpoint, timeout: int = 10): - self.endpoint = endpoint - self.timeout = timeout - - def get_raw_transaction(self, tx_hash): - return self._call_endpoint("getrawtransaction", params=[tx_hash]) - - def send_raw_transaction(self, raw_tx: str): - return self._call_endpoint("sendrawtransaction", params=[raw_tx]) - - def get_storage(self, sc_hash: str, storage_key: str): - return self._call_endpoint("getstorage", params=[sc_hash, storage_key]) - - def invoke_function( - self, - sc_hash: str, - function: str, - params: Optional[list] = None, - signers: Optional[list] = None, - ) -> Dict[str, Any]: - return self._call_endpoint( - "invokefunction", params=[sc_hash, function, params or [], signers or []] - ) - - def get_transaction_height(self, txid: str): - return self._call_endpoint("gettransactionheight", params=[txid]) - - def get_nep17_transfers(self, address, timestamps=None): - params = [address] - if timestamps: - params.append(timestamps) - return self._call_endpoint("getnep17transfers", params) - - def get_nep17_balances(self, address): - return self._call_endpoint("getnep17balances", [address, 0]) - - def get_application_log(self, tx_hash): - return self._call_endpoint("getapplicationlog", params=[tx_hash]) - - def get_contract_state(self, contract_id): - """ - `contract_id` might be contract name, script hash or number - """ - return self._call_endpoint("getcontractstate", params=[contract_id]) - - def _call_endpoint(self, method, params=None) -> Dict[str, Any]: - payload = _build_payload(method, params) - logger.info(payload) - try: - response = requests.post(self.endpoint, data=payload, timeout=self.timeout) - response.raise_for_status() - if response.status_code == 200: - if "result" in response.json(): - return response.json()["result"] - return response.json() - except Exception as exc: - raise NeoRPCException( - f"Could not call method {method} " - f"with endpoint: {self.endpoint}: {exc}" - f"\nRequest sent: {payload}" - ) from exc - - -def _build_payload(method, params: Optional[list] = None): - payload = json.dumps({"jsonrpc": "2.0", "method": method, "params": params or [], "id": 1}) - return payload.replace("'", '"') diff --git a/src/frostfs_testlib/cli/__init__.py b/src/frostfs_testlib/cli/__init__.py deleted file mode 100644 index 7e3d243..0000000 --- a/src/frostfs_testlib/cli/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from frostfs_testlib.cli.frostfs_adm import FrostfsAdm -from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate -from frostfs_testlib.cli.frostfs_cli import FrostfsCli -from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py deleted file mode 100644 index 224e9e3..0000000 --- a/src/frostfs_testlib/cli/cli_command.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Optional - -from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell -from frostfs_testlib.utils.datetime_utils import parse_time - - -class CliCommand: - TIMEOUT_INACCURACY = 10 - WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" - WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" - - cli_exec_path: Optional[str] = None - __base_params: Optional[str] = None - map_params = { - "json_mode": "json", - "await_mode": "await", - "hash_type": "hash", - "doc_type": "type", - "to_address": "to", - "from_address": "from", - "to_file": "to", - "from_file": "from", - } - - def __init__(self, shell: Shell, cli_exec_path: str, **base_params): - self.shell = shell - self.cli_exec_path = cli_exec_path - self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) - - def _format_command(self, command: str, **params) -> str: - param_str = [] - for param, value in params.items(): - if param == "post_data": - param_str.append(value) - continue - if param in self.map_params.keys(): - param = self.map_params[param] - param = param.replace("_", "-") - if not value: - continue - if isinstance(value, bool): - param_str.append(f"--{param}") - elif isinstance(value, int): - param_str.append(f"--{param} {value}") - elif isinstance(value, list): - for value_item in value: - val_str = str(value_item).replace("'", "\\'") - param_str.append(f"--{param} '{val_str}'") - elif isinstance(value, dict): - param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') - else: - if "'" in str(value): - value_str = str(value).replace('"', '\\"') - param_str.append(f'--{param} "{value_str}"') - else: - param_str.append(f"--{param} '{value}'") - - param_str = " ".join(param_str) - - return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" - - def _execute(self, command: Optional[str], **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - - return self.shell.exec( - self._format_command(command, **params), - CommandOptions(timeout=timeout), - ) - - def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - - return self.shell.exec( - self._format_command(command, **params), - CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], - timeout=timeout, - ), - ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/__init__.py b/src/frostfs_testlib/cli/frostfs_adm/__init__.py deleted file mode 100644 index d592eaf..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.cli.frostfs_adm.adm import FrostfsAdm diff --git a/src/frostfs_testlib/cli/frostfs_adm/adm.py b/src/frostfs_testlib/cli/frostfs_adm/adm.py deleted file mode 100644 index 0b56fbd..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/adm.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.frostfs_adm.config import FrostfsAdmConfig -from frostfs_testlib.cli.frostfs_adm.morph import FrostfsAdmMorph -from frostfs_testlib.cli.frostfs_adm.storage_config import FrostfsAdmStorageConfig -from frostfs_testlib.cli.frostfs_adm.subnet import FrostfsAdmMorphSubnet -from frostfs_testlib.cli.frostfs_adm.version import FrostfsAdmVersion -from frostfs_testlib.shell import Shell - - -class FrostfsAdm: - morph: FrostfsAdmMorph - subnet: FrostfsAdmMorphSubnet - storage_config: FrostfsAdmStorageConfig - version: FrostfsAdmVersion - config: FrostfsAdmConfig - - def __init__(self, shell: Shell, frostfs_adm_exec_path: str, config_file: Optional[str] = None): - self.config = FrostfsAdmConfig(shell, frostfs_adm_exec_path, config=config_file) - self.morph = FrostfsAdmMorph(shell, frostfs_adm_exec_path, config=config_file) - self.subnet = FrostfsAdmMorphSubnet(shell, frostfs_adm_exec_path, config=config_file) - self.storage_config = FrostfsAdmStorageConfig( - shell, frostfs_adm_exec_path, config=config_file - ) - self.version = FrostfsAdmVersion(shell, frostfs_adm_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_adm/config.py b/src/frostfs_testlib/cli/frostfs_adm/config.py deleted file mode 100644 index a29b3ac..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/config.py +++ /dev/null @@ -1,22 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAdmConfig(CliCommand): - def init(self, path: str = "~/.frostfs/adm/config.yml") -> CommandResult: - """Initialize basic frostfs-adm configuration file. - - Args: - path: Path to config (default ~/.frostfs/adm/config.yml). - - Returns: - Command's result. - """ - return self._execute( - "config init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py deleted file mode 100644 index bdf4a91..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ /dev/null @@ -1,488 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAdmMorph(CliCommand): - def deposit_notary( - self, - rpc_endpoint: str, - account: str, - gas: str, - storage_wallet: Optional[str] = None, - till: Optional[str] = None, - ) -> CommandResult: - """Deposit GAS for notary service. - - Args: - account: Wallet account address. - gas: Amount of GAS to deposit. - rpc_endpoint: N3 RPC node endpoint. - storage_wallet: Path to storage node wallet. - till: Notary deposit duration in blocks. - - Returns: - Command's result. - """ - return self._execute( - "morph deposit-notary", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def dump_balances( - self, - rpc_endpoint: str, - alphabet: Optional[str] = None, - proxy: Optional[str] = None, - script_hash: Optional[str] = None, - storage: Optional[str] = None, - ) -> CommandResult: - """Dump GAS balances. - - Args: - alphabet: Dump balances of alphabet contracts. - proxy: Dump balances of the proxy contract. - rpc_endpoint: N3 RPC node endpoint. - script_hash: Use script-hash format for addresses. - storage: Dump balances of storage nodes from the current netmap. - - Returns: - Command's result. - """ - return self._execute( - "morph dump-balances", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def dump_config(self, rpc_endpoint: str) -> CommandResult: - """Section for morph network configuration commands. - - Args: - rpc_endpoint: N3 RPC node endpoint - - Returns: - Command's result. - """ - return self._execute( - "morph dump-config", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: - """Add/update global config value in the FrostFS network. - - Args: - set_key_value: key1=val1 [key2=val2 ...] - alphabet_wallets: Path to alphabet wallets dir - rpc_endpoint: N3 RPC node endpoint - - Returns: - Command's result. - """ - return self._execute( - f"morph set-config {set_key_value}", - **{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]}, - ) - - def dump_containers( - self, - rpc_endpoint: str, - cid: Optional[str] = None, - container_contract: Optional[str] = None, - dump: str = "./testlib_dump_container", - ) -> CommandResult: - """Dump FrostFS containers to file. - - Args: - cid: Containers to dump. - container_contract: Container contract hash (for networks without NNS). - dump: File where to save dumped containers (default: ./testlib_dump_container). - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph dump-containers", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: - """Dump deployed contract hashes. - - Args: - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph dump-hashes", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None - ) -> CommandResult: - """Create new FrostFS epoch event in the side chain. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph force-new-epoch", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def generate_alphabet( - self, - rpc_endpoint: str, - alphabet_wallets: str, - size: int = 7, - ) -> CommandResult: - """Generate alphabet wallets for consensus nodes of the morph network. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - size: Amount of alphabet wallets to generate (default 7). - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph generate-alphabet", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def generate_storage_wallet( - self, - rpc_endpoint: str, - alphabet_wallets: str, - storage_wallet: str, - initial_gas: Optional[str] = None, - ) -> CommandResult: - """Generate storage node wallet for the morph network. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - initial_gas: Initial amount of GAS to transfer. - rpc_endpoint: N3 RPC node endpoint. - storage_wallet: Path to new storage node wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph generate-storage-wallet", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def init( - self, - rpc_endpoint: str, - alphabet_wallets: str, - contracts: str, - protocol: str, - container_alias_fee: int = 500, - container_fee: int = 1000, - epoch_duration: int = 240, - homomorphic_disabled: bool = False, - local_dump: Optional[str] = None, - max_object_size: int = 67108864, - ) -> CommandResult: - """Section for morph network configuration commands. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - container_alias_fee: Container alias fee (default 500). - container_fee: Container registration fee (default 1000). - contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest git release). - epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). - homomorphic_disabled: Disable object homomorphic hashing. - local_dump: Path to the blocks dump file. - max_object_size: Max single object size in bytes (default 67108864). - protocol: Path to the consensus node configuration. - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph init", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def refill_gas( - self, - rpc_endpoint: str, - alphabet_wallets: str, - storage_wallet: str, - gas: Optional[str] = None, - ) -> CommandResult: - """Refill GAS of storage node's wallet in the morph network - - Args: - alphabet_wallets: Path to alphabet wallets dir. - gas: Additional amount of GAS to transfer. - rpc_endpoint: N3 RPC node endpoint. - storage_wallet: Path to new storage node wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph refill-gas", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def restore_containers( - self, - rpc_endpoint: str, - alphabet_wallets: str, - cid: str, - dump: str, - ) -> CommandResult: - """Restore FrostFS containers from file. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - cid: Containers to restore. - dump: File to restore containers from. - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph restore-containers", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def set_policy( - self, - rpc_endpoint: str, - alphabet_wallets: str, - exec_fee_factor: Optional[int] = None, - storage_price: Optional[int] = None, - fee_per_byte: Optional[int] = None, - ) -> CommandResult: - """Set global policy values. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - exec_fee_factor: ExecFeeFactor=. - storage_price: StoragePrice=. - fee_per_byte: FeePerByte=. - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - non_param_attribute = "" - if exec_fee_factor: - non_param_attribute += f"ExecFeeFactor={exec_fee_factor} " - if storage_price: - non_param_attribute += f"StoragePrice={storage_price} " - if fee_per_byte: - non_param_attribute += f"FeePerByte={fee_per_byte} " - return self._execute( - f"morph restore-containers {non_param_attribute}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "exec_fee_factor", "storage_price", "fee_per_byte"] - }, - ) - - def update_contracts( - self, - rpc_endpoint: str, - alphabet_wallets: str, - contracts: Optional[str] = None, - ) -> CommandResult: - """Update FrostFS contracts. - - Args: - alphabet_wallets: Path to alphabet wallets dir. - contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest git release). - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - return self._execute( - "morph update-contracts", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def remove_nodes( - self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None - ) -> CommandResult: - """Move node to the Offline state in the candidates list - and tick an epoch to update the netmap using frostfs-adm - - Args: - node_netmap_keys: list of nodes netmap keys. - alphabet_wallets: Path to alphabet wallets dir. - rpc_endpoint: N3 RPC node endpoint. - - Returns: - Command's result. - """ - if not len(node_netmap_keys): - raise AttributeError("Got empty node_netmap_keys list") - - return self._execute( - f"morph remove-nodes {' '.join(node_netmap_keys)}", - **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, - ) - - def add_rule( - self, - chain_id: str, - target_name: str, - target_type: str, - rule: Optional[list[str]] = None, - path: Optional[str] = None, - chain_id_hex: Optional[bool] = None, - chain_name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - path: Path to encoded chain in JSON or binary format - rule: Rule statement - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape add-rule-chain", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_rule( - self, - chain_id: str, - target_name: str, - target_type: str, - chain_id_hex: Optional[bool] = None, - chain_name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - chain-id string Chain id - chain-id-hex Flag to parse chain ID as hex - target-name string Resource name in APE resource name format - target-type string Resource type(container/namespace) - timeout duration Timeout for an operation (default 15s) - wallet string Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape get-rule-chain", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_rules( - self, - target_type: str, - target_name: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - chain_name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape list-rule-chains", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def remove_rule( - self, - chain_id: str, - target_name: str, - target_type: str, - all: Optional[bool] = None, - chain_name: Optional[str] = None, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - all: Remove all chains - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape rm-rule-chain", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_nns_records( - self, - name: str, - type: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - alphabet_wallets: Optional[str] = None, - ) -> CommandResult: - """Returns domain record of the specified type - - Args: - name: Domain name - type: Domain name service record type(A|CNAME|SOA|TXT) - rpc_endpoint: N3 RPC node endpoint - alphabet_wallets: path to alphabet wallets dir - - Returns: - Command's result - """ - return self._execute( - "morph nns get-records", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/storage_config.py b/src/frostfs_testlib/cli/frostfs_adm/storage_config.py deleted file mode 100644 index 81bf210..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/storage_config.py +++ /dev/null @@ -1,23 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAdmStorageConfig(CliCommand): - def set(self, account: str, wallet: str) -> CommandResult: - """Initialize basic frostfs-adm configuration file. - - Args: - account: Wallet account. - wallet: Path to wallet. - - Returns: - Command's result. - """ - return self._execute( - "storage-config", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/subnet.py b/src/frostfs_testlib/cli/frostfs_adm/subnet.py deleted file mode 100644 index fb5935e..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/subnet.py +++ /dev/null @@ -1,239 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAdmMorphSubnet(CliCommand): - def create( - self, rpc_endpoint: str, address: str, wallet: str, notary: bool = False - ) -> CommandResult: - """Create FrostFS subnet. - - Args: - address: Address in the wallet, optional. - notary: Flag to create subnet in notary environment. - rpc_endpoint: N3 RPC node endpoint. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet create", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def get(self, rpc_endpoint: str, subnet: str) -> CommandResult: - """Read information about the FrostFS subnet. - - Args: - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet get", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def remove( - self, rpc_endpoint: str, wallet: str, subnet: str, address: Optional[str] = None - ) -> CommandResult: - """Remove FrostFS subnet. - - Args: - address: Address in the wallet, optional. - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet remove", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def admin_add( - self, - rpc_endpoint: str, - wallet: str, - admin: str, - subnet: str, - client: Optional[str] = None, - group: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Add admin to the FrostFS subnet. - - Args: - address: Address in the wallet, optional. - admin: Hex-encoded public key of the admin. - client: Add client admin instead of node one. - group: Client group ID in text format (needed with --client only). - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet admin add", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def admin_remove( - self, - rpc_endpoint: str, - wallet: str, - admin: str, - subnet: str, - client: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Remove admin of the FrostFS subnet. - - Args: - address: Address in the wallet, optional. - admin: Hex-encoded public key of the admin. - client: Remove client admin instead of node one. - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet admin remove", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def client_add( - self, - rpc_endpoint: str, - wallet: str, - subnet: str, - client: Optional[str] = None, - group: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Add client to the FrostFS subnet. - - Args: - address: Address in the wallet, optional. - client: Add client admin instead of node one. - group: Client group ID in text format (needed with --client only). - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet client add", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def client_remove( - self, - rpc_endpoint: str, - wallet: str, - client: str, - group: str, - subnet: str, - address: Optional[str] = None, - ) -> CommandResult: - """Remove client of the FrostFS subnet. - - Args: - address: Address in the wallet, optional. - client: Remove client admin instead of node one. - group: ID of the client group to work with. - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet client remove", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def node_add(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: - """Add node to the FrostFS subnet. - - Args: - node: Hex-encoded public key of the node. - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet node add", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def node_remove(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: - """Remove node from the FrostFS subnet. - - Args: - node: Hex-encoded public key of the node. - rpc_endpoint: N3 RPC node endpoint. - subnet: ID of the subnet to read. - wallet: Path to file with wallet. - - Returns: - Command's result. - """ - return self._execute( - "morph subnet node remove", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/version.py b/src/frostfs_testlib/cli/frostfs_adm/version.py deleted file mode 100644 index 7d09afc..0000000 --- a/src/frostfs_testlib/cli/frostfs_adm/version.py +++ /dev/null @@ -1,12 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAdmVersion(CliCommand): - def get(self) -> CommandResult: - """Application version - - Returns: - Command's result. - """ - return self._execute("", version=True) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/__init__.py b/src/frostfs_testlib/cli/frostfs_authmate/__init__.py deleted file mode 100644 index 6f2d765..0000000 --- a/src/frostfs_testlib/cli/frostfs_authmate/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate diff --git a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py deleted file mode 100644 index 7912dae..0000000 --- a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.frostfs_authmate.secret import FrostfsAuthmateSecret -from frostfs_testlib.cli.frostfs_authmate.version import FrostfsAuthmateVersion -from frostfs_testlib.shell import Shell - - -class FrostfsAuthmate: - secret: FrostfsAuthmateSecret - version: FrostfsAuthmateVersion - - def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): - self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) - self.version = FrostfsAuthmateVersion(shell, frostfs_authmate_exec_path) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/secret.py b/src/frostfs_testlib/cli/frostfs_authmate/secret.py deleted file mode 100644 index 5f300bc..0000000 --- a/src/frostfs_testlib/cli/frostfs_authmate/secret.py +++ /dev/null @@ -1,90 +0,0 @@ -from typing import Optional, Union - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAuthmateSecret(CliCommand): - def obtain( - self, - wallet: str, - wallet_password: str, - peer: str, - gate_wallet: str, - access_key_id: str, - address: Optional[str] = None, - gate_address: Optional[str] = None, - ) -> CommandResult: - """Obtain a secret from FrostFS network. - - Args: - wallet: Path to the wallet. - wallet_password: Wallet password. - address: Address of wallet account. - peer: Address of frostfs peer to connect to. - gate_wallet: Path to the wallet. - gate_address: Address of wallet account. - access_key_id: Access key id for s3. - - Returns: - Command's result. - """ - return self._execute_with_password( - "obtain-secret", - wallet_password, - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def issue( - self, - wallet: str, - wallet_password: str, - peer: str, - gate_public_key: Union[str, list[str]], - address: Optional[str] = None, - container_id: Optional[str] = None, - container_friendly_name: Optional[str] = None, - container_placement_policy: Optional[str] = None, - session_tokens: Optional[str] = None, - lifetime: Optional[str] = None, - container_policy: Optional[str] = None, - aws_cli_credentials: Optional[str] = None, - ) -> CommandResult: - """Obtain a secret from FrostFS network - - Args: - wallet: Path to the wallet. - wallet_password: Wallet password. - address: Address of wallet account. - peer: Address of a frostfs peer to connect to. - bearer_rules: Rules for bearer token as plain json string. - gate_public_key: Public 256r1 key of a gate (send list[str] of keys to use multiple gates). - container_id: Auth container id to put the secret into. - container_friendly_name: Friendly name of auth container to put the secret into. - container_placement_policy: Placement policy of auth container to put the secret into - (default: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X"). - session_tokens: Create session tokens with rules, if the rules are set as 'none', no - session tokens will be created. - lifetime: Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to - set a day you should use 24h). It will be ceil rounded to the nearest amount of - epoch. (default: 720h0m0s). - container_policy: Mapping AWS storage class to FrostFS storage policy as plain json string - or path to json file. - aws_cli_credentials: Path to the aws cli credential file. - - Returns: - Command's result. - """ - return self._execute_with_password( - "issue-secret", - wallet_password, - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/version.py b/src/frostfs_testlib/cli/frostfs_authmate/version.py deleted file mode 100644 index d459a06..0000000 --- a/src/frostfs_testlib/cli/frostfs_authmate/version.py +++ /dev/null @@ -1,12 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsAuthmateVersion(CliCommand): - def get(self) -> CommandResult: - """Application version - - Returns: - Command's result. - """ - return self._execute("", version=True) diff --git a/src/frostfs_testlib/cli/frostfs_cli/__init__.py b/src/frostfs_testlib/cli/frostfs_cli/__init__.py deleted file mode 100644 index e67f887..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli diff --git a/src/frostfs_testlib/cli/frostfs_cli/accounting.py b/src/frostfs_testlib/cli/frostfs_cli/accounting.py deleted file mode 100644 index 7b2b9f0..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/accounting.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliAccounting(CliCommand): - def balance( - self, - wallet: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - address: Optional[str] = None, - owner: Optional[str] = None, - ) -> CommandResult: - """Get internal balance of FrostFS account - - Args: - address: Address of wallet account. - owner: Owner of balance account (omit to use owner from private key). - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - - Returns: - Command's result. - - """ - return self._execute( - "accounting balance", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/acl.py b/src/frostfs_testlib/cli/frostfs_cli/acl.py deleted file mode 100644 index 3e60582..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/acl.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliACL(CliCommand): - def extended_create( - self, cid: str, out: str, file: Optional[str] = None, rule: Optional[list] = None - ) -> CommandResult: - - """Create extended ACL from the text representation. - - Rule consist of these blocks: [ ...] [ ...] - Action is 'allow' or 'deny'. - Operation is an object service verb: 'get', 'head', 'put', 'search', 'delete', 'getrange', - or 'getrangehash'. - - Filter consists of : - Typ is 'obj' for object applied filter or 'req' for request applied filter. - Key is a valid unicode string corresponding to object or request header key. - Well-known system object headers start with '$Object:' prefix. - User defined headers start without prefix. - Read more about filter keys at: - https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter - Match is '=' for matching and '!=' for non-matching filter. - Value is a valid unicode string corresponding to object or request header value. - - Target is - 'user' for container owner, - 'system' for Storage nodes in container and Inner Ring nodes, - 'others' for all other request senders, - 'pubkey:,,...' for exact request sender, where is a hex-encoded 33-byte - public key. - - When both '--rule' and '--file' arguments are used, '--rule' records will be placed higher - in resulting extended ACL table. - - Args: - cid: Container ID. - file: Read list of extended ACL table records from from text file. - out: Save JSON formatted extended ACL table in file. - rule: Extended ACL table record to apply. - - Returns: - Command's result. - - """ - return self._execute( - "acl extended create", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py deleted file mode 100644 index 525a9be..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py +++ /dev/null @@ -1,70 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliApeManager(CliCommand): - """Operations with APE manager.""" - - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Add rule chain for a target.""" - - return self._execute( - "ape-manager add", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Generate APE override by target and APE chains. Util command. - - Generated APE override can be dumped to a file in JSON format that is passed to - "create" command. - """ - - return self._execute( - "ape-manager list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Generate APE override by target and APE chains. Util command. - - Generated APE override can be dumped to a file in JSON format that is passed to - "create" command. - """ - - return self._execute( - "ape-manager remove", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/bearer.py b/src/frostfs_testlib/cli/frostfs_cli/bearer.py deleted file mode 100644 index e21a6c8..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/bearer.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliBearer(CliCommand): - def create( - self, - rpc_endpoint: str, - out: str, - issued_at: Optional[str] = None, - expire_at: Optional[str] = None, - not_valid_before: Optional[str] = None, - ape: Optional[str] = None, - eacl: Optional[str] = None, - owner: Optional[str] = None, - json: Optional[bool] = False, - impersonate: Optional[bool] = False, - wallet: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Create bearer token. - - All epoch flags can be specified relative to the current epoch with the +n syntax. - In this case --rpc-endpoint flag should be specified and the epoch in bearer token - is set to current epoch + n. - """ - return self._execute( - "bearer create", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def generate_ape_override( - self, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - cid: Optional[str] = None, - output: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Generate APE override by target and APE chains. Util command. - - Generated APE override can be dumped to a file in JSON format that is passed to - "create" command. - """ - - return self._execute( - "bearer generate-ape-override", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py deleted file mode 100644 index 7874f18..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting -from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL -from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager -from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer -from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer -from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl -from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap -from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject -from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession -from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards -from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup -from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree -from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil -from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion -from frostfs_testlib.shell import Shell - - -class FrostfsCli: - accounting: FrostfsCliAccounting - acl: FrostfsCliACL - container: FrostfsCliContainer - netmap: FrostfsCliNetmap - object: FrostfsCliObject - session: FrostfsCliSession - shards: FrostfsCliShards - storagegroup: FrostfsCliStorageGroup - util: FrostfsCliUtil - version: FrostfsCliVersion - control: FrostfsCliControl - ape_manager: FrostfsCliApeManager - - def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): - self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) - self.acl = FrostfsCliACL(shell, frostfs_cli_exec_path, config=config_file) - self.container = FrostfsCliContainer(shell, frostfs_cli_exec_path, config=config_file) - self.netmap = FrostfsCliNetmap(shell, frostfs_cli_exec_path, config=config_file) - self.object = FrostfsCliObject(shell, frostfs_cli_exec_path, config=config_file) - self.session = FrostfsCliSession(shell, frostfs_cli_exec_path, config=config_file) - self.shards = FrostfsCliShards(shell, frostfs_cli_exec_path, config=config_file) - self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) - self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) - self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) - self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) - self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) - self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file) - self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py deleted file mode 100644 index 8bcbe9e..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ /dev/null @@ -1,332 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliContainer(CliCommand): - def create( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - - Args: - address: Address of wallet account. - attributes: Comma separated pairs of container attributes in form of - Key1=Value1,Key2=Value2. - await_mode: Block execution until container is persisted. - basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', - 'private', 'eacl-public-read' (default "private"). - disable_timestamp: Disable timestamp container attribute. - force: Skip placement validity check. - trace: Generate trace ID and print it. - name: Container name attribute. - nonce: UUIDv4 nonce value for container. - policy: QL-encoded or JSON-encoded placement policy or path to file with it. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. - subnet: String representation of container subnetwork. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - nns_zone: Container nns zone attribute. - nns_name: Container nns name attribute. - - Returns: - Command's result. - """ - return self._execute( - "container create", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def delete( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> CommandResult: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - - Args: - address: Address of wallet account. - await_mode: Block execution until container is removed. - cid: Container ID. - trace: Generate trace ID and print it. - force: Do not check whether container contains locks and remove immediately. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - - Returns: - Command's result. - """ - - return self._execute( - "container delete", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = False, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get container field info. - - Args: - address: Address of wallet account. - await_mode: Block execution until container is removed. - cid: Container ID. - json_mode: Print or dump container in JSON format. - trace: Generate trace ID and print it. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - to: Path to dump encoded container. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. - - Returns: - Command's result. - """ - return self._execute( - "container get", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_eacl( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get extended ACL table of container. - - Args: - address: Address of wallet account. - await_mode: Block execution until container is removed. - cid: Container ID. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - to: Path to dump encoded container. - json_mode: Print or dump container in JSON format. - trace: Generate trace ID and print it. - session: Path to a JSON-encoded container session token. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. - - Returns: - Command's result. - - """ - return self._execute( - "container get-eacl", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list( - self, - rpc_endpoint: str, - name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> CommandResult: - """ - List all created containers. - - Args: - address: Address of wallet account. - name: List containers by the attribute name. - owner: Owner of containers (omit to use owner from private key). - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - trace: Generate trace ID and print it. - timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. - - Returns: - Command's result. - """ - return self._execute( - "container list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_objects( - self, - rpc_endpoint: str, - cid: str, - bearer: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - List existing objects in container. - - Args: - address: Address of wallet account. - cid: Container ID. - bearer: File with signed JSON or binary encoded bearer token. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - trace: Generate trace ID and print it. - timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. - - Returns: - Command's result. - """ - return self._execute( - "container list-objects", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - # TODO Deprecated method with 0.42 - def set_eacl( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - await_mode: bool = False, - table: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Set a new extended ACL table for the container. - Container ID in the EACL table will be substituted with the ID from the CLI. - - Args: - address: Address of wallet account. - await_mode: Block execution until container is removed. - cid: Container ID. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. - table: Path to file with JSON or binary encoded EACL table. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "container set-eacl", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def search_node( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Show the nodes participating in the container in the current epoch. - - Args: - rpc_endpoint: string Remote host address (as 'multiaddr' or ':') - wallet: WIF (NEP-2) string or path to the wallet or binary key. - cid: Container ID. - address: Address of wallet account. - ttl: TTL value in request meta header (default 2). - from_file: string File path with encoded container - timeout: duration Timeout for the operation (default 15 s) - short: shorten the output of node information. - trace: Generate trace ID and print it. - xhdr: Dict with request X-Headers. - generate_key: Generate a new private key. - - Returns: - - """ - from_str = f"--from {from_file}" if from_file else "" - - return self._execute( - f"container nodes {from_str}", - **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py deleted file mode 100644 index 957bca9..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ /dev/null @@ -1,232 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliControl(CliCommand): - def set_status( - self, - endpoint: str, - status: str, - wallet: Optional[str] = None, - force: Optional[bool] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Set status of the storage node in FrostFS network map - - Args: - wallet: Path to the wallet or binary key - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - force: Force turning to local maintenance - status: New netmap status keyword ('online', 'offline', 'maintenance') - timeout: Timeout for an operation (default 15s) - - Returns: - Command`s result. - """ - return self._execute( - "control set-status", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def healthcheck( - self, - endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Health check for FrostFS storage nodes - - Args: - wallet: Path to the wallet or binary key - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - - Returns: - Command`s result. - """ - return self._execute( - "control healthcheck", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def drop_objects( - self, - endpoint: str, - objects: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - wallet: Path to the wallet or binary key - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - objects: List of object addresses to be removed in string format - timeout: Timeout for an operation (default 15s) - - Returns: - Command`s result. - """ - return self._execute( - "control drop-objects", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def add_rule( - self, - endpoint: str, - chain_id: str, - target_name: str, - target_type: str, - rule: Optional[list[str]] = None, - path: Optional[str] = None, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') - path: Path to encoded chain in JSON or binary format - rule: Rule statement - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control add-rule", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_rule( - self, - endpoint: str, - chain_id: str, - target_name: str, - target_type: str, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address string Address of wallet account - chain-id string Chain id - chain-id-hex Flag to parse chain ID as hex - endpoint string Remote node control address (as 'multiaddr' or ':') - target-name string Resource name in APE resource name format - target-type string Resource type(container/namespace) - timeout duration Timeout for an operation (default 15s) - wallet string Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control get-rule", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_rules( - self, - endpoint: str, - target_name: str, - target_type: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control list-rules", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_targets( - self, - endpoint: str, - chain_name: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - chain-name: Chain name(ingress|s3) - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control list-targets", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def remove_rule( - self, - endpoint: str, - chain_id: str, - target_name: str, - target_type: str, - all: Optional[bool] = None, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - all: Remove all chains - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control remove-rule", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py deleted file mode 100644 index cd197d3..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliNetmap(CliCommand): - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get current epoch number. - - Args: - address: Address of wallet account. - generate_key: Generate new private key. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header (default 2). - wallet: Path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "netmap epoch", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get information about FrostFS network. - - Args: - address: Address of wallet account - generate_key: Generate new private key - rpc_endpoint: Remote node address (as 'multiaddr' or ':') - ttl: TTL value in request meta header (default 2) - wallet: Path to the wallet or binary key - xhdr: Request X-Headers in form of Key=Value - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "netmap netinfo", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get target node info. - - Args: - address: Address of wallet account. - generate_key: Generate new private key. - json: Print node info in JSON format. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header (default 2). - wallet: Path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "netmap nodeinfo", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Request current local snapshot of the network map. - - Args: - address: Address of wallet account. - generate_key: Generate new private key. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header (default 2). - wallet: Path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "netmap snapshot", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py deleted file mode 100644 index e536544..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ /dev/null @@ -1,458 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliObject(CliCommand): - def delete( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Delete object from FrostFS. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object DELETE session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object delete", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - file: Optional[str] = None, - header: Optional[str] = None, - no_progress: bool = False, - raw: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get object from FrostFS. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - file: File to write object payload to. Default: stdout. - generate_key: Generate new private key. - header: File to write header to. Default: stdout. - no_progress: Do not show progress bar. - oid: Object ID. - raw: Set raw request option. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object GET session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object get", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def hash( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get object hash. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - range: Range to take hash from in the form offset1:length1,... - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - salt: Salt in hex format. - ttl: TTL value in request meta header (default 2). - session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. - hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object hash", - **{param: value for param, value in locals().items() if param not in ["self", "params"]}, - ) - - def head( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - file: Optional[str] = None, - json_mode: bool = False, - main_only: bool = False, - proto: bool = False, - raw: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get object header. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - file: File to write object payload to. Default: stdout. - generate_key: Generate new private key. - json_mode: Marshal output in JSON. - main_only: Return only main fields. - oid: Object ID. - proto: Marshal output in Protobuf. - raw: Set raw request option. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object HEAD session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object head", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def lock( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Lock object in container. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - lifetime: Lock lifetime. - expire_at: Lock expiration epoch. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object PUT session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object lock", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def put( - self, - rpc_endpoint: str, - cid: str, - file: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - copies_number: Optional[int] = None, - disable_filename: bool = False, - disable_timestamp: bool = False, - expire_at: Optional[int] = None, - no_progress: bool = False, - notify: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Put object to FrostFS. - - Args: - address: Address of wallet account. - attributes: User attributes in form of Key1=Value1,Key2=Value2. - bearer: File with signed JSON or binary encoded bearer token. - copies_number: Number of copies of the object to store within the RPC call. - cid: Container ID. - disable_filename: Do not set well-known filename attribute. - disable_timestamp: Do not set well-known timestamp attribute. - expire_at: Last epoch in the life of the object. - file: File with object payload. - generate_key: Generate new private key. - no_progress: Do not show progress bar. - notify: Object notification in the form of *epoch*:*topic*; '-' - topic means using default. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object PUT session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object put", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def patch( - self, - rpc_endpoint: str, - cid: str, - oid: str, - range: list[str] = None, - payload: list[str] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ttl: Optional[int] = None, - wallet: Optional[str] = None, - xhdr: Optional[dict] = None, - ) -> CommandResult: - """ - PATCH an object. - - Args: - rpc_endpoint: Remote node address (as 'multiaddr' or ':') - cid: Container ID - oid: Object ID - range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] - payload: An array of file paths to be applied in each range - new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 - replace_attrs: Replace all attributes completely with new ones specified in new_attrs - address: Address of wallet account - bearer: File with signed JSON or binary encoded bearer token - generate_key: Generate new private key - session: Filepath to a JSON- or binary-encoded token of the object RANGE session - timeout: Timeout for the operation - trace: Generate trace ID and print it - ttl: TTL value in request meta header (default 2) - wallet: WIF (NEP-2) string or path to the wallet or binary key - xhdr: Dict with request X-Headers - - Returns: - Command's result. - """ - return self._execute( - "object patch", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def range( - self, - rpc_endpoint: str, - cid: str, - oid: str, - range: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - file: Optional[str] = None, - json_mode: bool = False, - raw: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get payload range data of an object. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - file: File to write object payload to. Default: stdout. - generate_key: Generate new private key. - json_mode: Marshal output in JSON. - oid: Object ID. - range: Range to take data from in the form offset:length. - raw: Set raw request option. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object RANGE session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object range", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def search( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - filters: Optional[list] = None, - oid: Optional[str] = None, - phy: bool = False, - root: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Search object. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - filters: Repeated filter expressions or files with protobuf JSON. - generate_key: Generate new private key. - oid: Object ID. - phy: Search physically stored objects. - root: Search for user objects. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Filepath to a JSON- or binary-encoded token of the object SEARCH session. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object search", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def nodes( - self, - rpc_endpoint: str, - cid: str, - oid: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Search object nodes. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - trace: Generate trace ID and print it. - root: Search for user objects. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - verify_presence_all: Verify the actual presence of the object on all netmap nodes. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object nodes", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/session.py b/src/frostfs_testlib/cli/frostfs_cli/session.py deleted file mode 100644 index 857b13e..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/session.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliSession(CliCommand): - def create( - self, - rpc_endpoint: str, - wallet: str, - out: str, - lifetime: Optional[int] = None, - address: Optional[str] = None, - json: Optional[bool] = False, - ) -> CommandResult: - """ - Create session token. - - Args: - address: Address of wallet account. - out: File to write session token to. - lifetime: Number of epochs for token to stay valid. - json: Output token in JSON. - wallet: WIF (NEP-2) string or path to the wallet or binary key. - wallet_password: Wallet password. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - - Returns: - Command's result. - """ - return self._execute( - "session create", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py deleted file mode 100644 index 68a2f54..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ /dev/null @@ -1,261 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliShards(CliCommand): - def flush_cache( - self, - endpoint: str, - wallet: str, - wallet_password: str, - id: Optional[list[str]], - address: Optional[str] = None, - all: bool = False, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Flush objects from the write-cache to the main storage. - - Args: - address: Address of wallet account. - id: List of shard IDs in base58 encoding. - all: Process all shards. - endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - wallet_password: Wallet password. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute_with_password( - f"control shards flush-cache", - wallet_password, - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def set_mode( - self, - endpoint: str, - mode: str, - id: Optional[list[str]] = None, - wallet: Optional[str] = None, - wallet_password: Optional[str] = None, - address: Optional[str] = None, - all: bool = False, - clear_errors: bool = False, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Set work mode of the shard. - - Args: - address: Address of wallet account. - id: List of shard IDs in base58 encoding. - mode: New shard mode ('degraded-read-only', 'read-only', 'read-write'). - all: Process all shards. - clear_errors: Set shard error count to 0. - endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - wallet_password: Wallet password. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - if not wallet_password: - return self._execute( - "control shards set-mode", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - return self._execute_with_password( - "control shards set-mode", - wallet_password, - **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, - ) - - def dump( - self, - endpoint: str, - wallet: str, - wallet_password: str, - id: str, - path: str, - address: Optional[str] = None, - no_errors: bool = False, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Dump objects from shard to a file. - - Args: - address: Address of wallet account. - no_errors: Skip invalid/unreadable objects. - id: Shard ID in base58 encoding. - path: File to write objects to. - endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - wallet_password: Wallet password. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute_with_password( - "control shards dump", - wallet_password, - **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, - ) - - def list( - self, - endpoint: str, - wallet: Optional[str] = None, - wallet_password: Optional[str] = None, - address: Optional[str] = None, - json_mode: bool = False, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - List shards of the storage node. - - Args: - address: Address of wallet account. - json_mode: Print shard info as a JSON array. - endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - wallet_password: Wallet password. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - if not wallet_password: - return self._execute( - "control shards list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - return self._execute_with_password( - "control shards list", - wallet_password, - **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, - ) - - def evacuation_start( - self, - endpoint: str, - id: Optional[str] = None, - scope: Optional[str] = None, - all: bool = False, - no_errors: bool = True, - await_mode: bool = False, - address: Optional[str] = None, - timeout: Optional[str] = None, - no_progress: bool = False, - ) -> CommandResult: - """ - Objects evacuation from shard to other shards. - - Args: - address: Address of wallet account - all: Process all shards - await: Block execution until evacuation is completed - endpoint: Remote node control address (as 'multiaddr' or ':') - id: List of shard IDs in base58 encoding - no_errors: Skip invalid/unreadable objects (default true) - no_progress: Print progress if await provided - scope: Evacuation scope; possible values: trees, objects, all (default "all") - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation start", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def evacuation_reset( - self, - endpoint: str, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Reset evacuate objects from shard to other shards status. - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation reset", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def evacuation_stop( - self, - endpoint: str, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Stop running evacuate process from shard to other shards. - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation stop", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def evacuation_status( - self, - endpoint: str, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get evacuate objects from shard to other shards status. - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation status", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): - """ - Detach and close the shards - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - id: List of shard IDs in base58 encoding - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards detach", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py b/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py deleted file mode 100644 index 8fb22ce..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py +++ /dev/null @@ -1,155 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliStorageGroup(CliCommand): - def put( - self, - rpc_endpoint: str, - wallet: str, - cid: str, - members: list[str], - ttl: Optional[int] = None, - bearer: Optional[str] = None, - lifetime: Optional[int] = None, - address: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Put storage group to FrostFS. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - members: ID list of storage group members. - lifetime: Storage group lifetime in epochs. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header. - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - members = ",".join(members) - return self._execute( - "storagegroup put", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get( - self, - rpc_endpoint: str, - wallet: str, - cid: str, - id: str, - raw: Optional[bool] = False, - ttl: Optional[int] = None, - bearer: Optional[str] = None, - lifetime: Optional[int] = None, - address: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get storage group from FrostFS. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - id: Storage group identifier. - raw: Set raw request option. - lifetime: Storage group lifetime in epochs. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header. - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "storagegroup get", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list( - self, - rpc_endpoint: str, - wallet: str, - cid: str, - raw: Optional[bool] = False, - ttl: Optional[int] = None, - bearer: Optional[str] = None, - lifetime: Optional[int] = None, - address: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - List storage groups in FrostFS container. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - raw: Set raw request option. - lifetime: Storage group lifetime in epochs. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header. - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "storagegroup list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def delete( - self, - rpc_endpoint: str, - wallet: str, - cid: str, - id: str, - raw: Optional[bool] = False, - ttl: Optional[int] = None, - bearer: Optional[str] = None, - lifetime: Optional[int] = None, - address: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Delete storage group from FrostFS. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - id: Storage group identifier. - raw: Set raw request option. - lifetime: Storage group lifetime in epochs. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - ttl: TTL value in request meta header. - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for an operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "storagegroup delete", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py deleted file mode 100644 index c75b526..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/tree.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliTree(CliCommand): - def healthcheck( - self, - wallet: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Get internal balance of FrostFS account - - Args: - address: Address of wallet account. - owner: Owner of balance account (omit to use owner from private key). - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - - Returns: - Command's result. - - """ - return self._execute( - "tree healthcheck", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list( - self, - cid: str, - rpc_endpoint: Optional[str] = None, - wallet: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Get Tree List - - Args: - cid: Container ID. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - timeout: duration Timeout for the operation (default 15 s) - - Returns: - Command's result. - - """ - return self._execute( - "tree list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py deleted file mode 100644 index 37347a5..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliUtil(CliCommand): - def sign_bearer_token( - self, - from_file: str, - to_file: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - json: Optional[bool] = False, - ) -> CommandResult: - """ - Sign bearer token to use it in requests. - - Args: - address: Address of wallet account. - from_file: File with JSON or binary encoded bearer token to sign. - to_file: File to dump signed bearer token (default: binary encoded). - json: Dump bearer token in JSON encoding. - wallet: WIF (NEP-2) string or path to the wallet or binary key. - - Returns: - Command's result. - """ - return self._execute( - "util sign bearer-token", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def sign_session_token( - self, - from_file: str, - to_file: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """ - Sign session token to use it in requests. - - Args: - address: Address of wallet account. - from_file: File with JSON encoded session token to sign. - to_file: File to dump signed bearer token (default: binary encoded). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - - Returns: - Command's result. - """ - return self._execute( - "util sign session-token", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False): - """Convert representation of extended ACL table.""" - - return self._execute( - "util convert eacl", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/version.py b/src/frostfs_testlib/cli/frostfs_cli/version.py deleted file mode 100644 index 9d22859..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/version.py +++ /dev/null @@ -1,13 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliVersion(CliCommand): - def get(self) -> CommandResult: - """ - Application version and FrostFS API compatibility. - - Returns: - Command's result. - """ - return self._execute("", version=True) diff --git a/src/frostfs_testlib/cli/generic_cli.py b/src/frostfs_testlib/cli/generic_cli.py deleted file mode 100644 index 2a80159..0000000 --- a/src/frostfs_testlib/cli/generic_cli.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Optional - -from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.shell.interfaces import CommandOptions, Shell - - -class GenericCli(object): - def __init__(self, cli_name: str, host: Host) -> None: - self.host = host - self.cli_name = cli_name - - def __call__( - self, - args: Optional[str] = "", - pipes: Optional[str] = "", - shell: Optional[Shell] = None, - options: Optional[CommandOptions] = None, - ): - if not shell: - shell = self.host.get_shell() - - cli_config = self.host.get_cli_config(self.cli_name, True) - extra_args = "" - exec_path = self.cli_name - if cli_config: - extra_args = " ".join(cli_config.extra_args) - exec_path = cli_config.exec_path - - cmd = f"{exec_path} {args} {extra_args} {pipes}" - return shell.exec(cmd, options) diff --git a/src/frostfs_testlib/cli/neogo/__init__.py b/src/frostfs_testlib/cli/neogo/__init__.py deleted file mode 100644 index 43d305b..0000000 --- a/src/frostfs_testlib/cli/neogo/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from frostfs_testlib.cli.neogo.go import NeoGo -from frostfs_testlib.cli.neogo.network_type import NetworkType diff --git a/src/frostfs_testlib/cli/neogo/candidate.py b/src/frostfs_testlib/cli/neogo/candidate.py deleted file mode 100644 index e4bf6b7..0000000 --- a/src/frostfs_testlib/cli/neogo/candidate.py +++ /dev/null @@ -1,134 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class NeoGoCandidate(CliCommand): - def register( - self, - address: str, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - gas: Optional[float] = None, - timeout: int = 10, - ) -> CommandResult: - """Register as a new candidate. - - Args: - address: Address to register. - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wallet_password: Wallet password. - gas: Network fee to add to the transaction (prioritizing it). - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - exec_param["timeout"] = f"{timeout}s" - if wallet_password is not None: - return self._execute_with_password( - "wallet candidate register", wallet_password, **exec_param - ) - if wallet_config: - return self._execute("wallet candidate register", **exec_param) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) - - def unregister( - self, - address: str, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - gas: Optional[float] = None, - timeout: int = 10, - ) -> CommandResult: - """Unregister self as a candidate. - - Args: - address: Address to unregister. - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wallet_password: Wallet password. - gas: Network fee to add to the transaction (prioritizing it). - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - exec_param["timeout"] = f"{timeout}s" - if wallet_password is not None: - return self._execute_with_password( - "wallet candidate unregister", wallet_password, **exec_param - ) - if wallet_config: - return self._execute("wallet candidate unregister", **exec_param) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) - - def vote( - self, - address: str, - candidate: str, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - gas: Optional[float] = None, - timeout: int = 10, - ) -> CommandResult: - """Votes for a validator. - - Voting happens by calling "vote" method of a NEO native contract. Do not provide - candidate argument to perform unvoting. - - Args: - address: Address to vote from - candidate: Public key of candidate to vote for. - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wallet_password: Wallet password. - gas: Network fee to add to the transaction (prioritizing it). - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - exec_param["timeout"] = f"{timeout}s" - if wallet_password is not None: - return self._execute_with_password( - "wallet candidate vote", wallet_password, **exec_param - ) - if wallet_config: - return self._execute("wallet candidate vote", **exec_param) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) diff --git a/src/frostfs_testlib/cli/neogo/contract.py b/src/frostfs_testlib/cli/neogo/contract.py deleted file mode 100644 index bc56dd9..0000000 --- a/src/frostfs_testlib/cli/neogo/contract.py +++ /dev/null @@ -1,398 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class NeoGoContract(CliCommand): - def compile( - self, - input_file: str, - out: str, - manifest: str, - config: str, - no_standards: bool = False, - no_events: bool = False, - no_permissions: bool = False, - bindings: Optional[str] = None, - ) -> CommandResult: - """Compile a smart contract to a .nef file. - - Args: - input_file: Input file for the smart contract to be compiled. - out: Output of the compiled contract. - manifest: Emit contract manifest (*.manifest.json) file into separate file using - configuration input file (*.yml). - config: Configuration input file (*.yml). - no_standards: Do not check compliance with supported standards. - no_events: Do not check emitted events with the manifest. - no_permissions: Do not check if invoked contracts are allowed in manifest. - bindings: Output file for smart-contract bindings configuration. - - Returns: - Command's result. - """ - return self._execute( - "contract compile", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def deploy( - self, - address: str, - input_file: str, - manifest: str, - rpc_endpoint: str, - sysgas: Optional[float] = None, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - gas: Optional[float] = None, - out: Optional[str] = None, - force: bool = False, - timeout: int = 10, - ) -> CommandResult: - """Deploy a smart contract (.nef with description) - - Args: - wallet: Wallet to use to get the key for transaction signing; - conflicts with wallet_config. - wallet_config: Path to wallet config to use to get the key for transaction signing; - conflicts with wallet. - wallet_password: Wallet password. - address: Address to use as transaction signee (and gas source). - gas: Network fee to add to the transaction (prioritizing it). - sysgas: System fee to add to transaction (compensating for execution). - out: File to put JSON transaction to. - force: Do not ask for a confirmation. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - input_file: Input file for the smart contract (*.nef). - manifest: Emit contract manifest (*.manifest.json) file into separate file using - configuration input file (*.yml). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - exec_param["timeout"] = f"{timeout}s" - - if wallet_password is not None: - return self._execute_with_password( - "contract deploy", - wallet_password, - **exec_param, - ) - if wallet_config: - return self._execute( - "contract deploy", - **exec_param, - ) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) - - def generate_wrapper( - self, - out: str, - hash: str, - config: Optional[str] = None, - manifest: Optional[str] = None, - ) -> CommandResult: - """Generate wrapper to use in other contracts. - - Args: - config: Configuration file to use. - manifest: Read contract manifest (*.manifest.json) file. - out: Output of the compiled contract. - hash: Smart-contract hash. - - Returns: - Command's result. - """ - return self._execute( - "contract generate-wrapper", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def invokefunction( - self, - scripthash: str, - address: Optional[str] = None, - wallet: Optional[str] = None, - method: Optional[str] = None, - arguments: Optional[str] = None, - multisig_hash: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - gas: Optional[float] = None, - sysgas: Optional[float] = None, - out: Optional[str] = None, - force: bool = False, - rpc_endpoint: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Executes given (as a script hash) deployed script. - - Script is executed with the given method, arguments and signers. Sender is included in - the list of signers by default with None witness scope. If you'd like to change default - sender's scope, specify it via signers parameter. See testinvokefunction documentation - for the details about parameters. It differs from testinvokefunction in that this command - sends an invocation transaction to the network. - - Args: - scripthash: Function hash. - method: Call method. - arguments: Method arguments. - multisig_hash: Multisig hash. - wallet: Wallet to use to get the key for transaction signing; - conflicts with wallet_config. - wallet_config: Path to wallet config to use to get the key for transaction signing; - conflicts with wallet. - wallet_password: Wallet password. - address: Address to use as transaction signee (and gas source). - gas: Network fee to add to the transaction (prioritizing it). - sysgas: System fee to add to transaction (compensating for execution). - out: File to put JSON transaction to. - force: Force-push the transaction in case of bad VM state after test script invocation. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - multisig_hash = f"-- {multisig_hash}" or "" - post_data = f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}" - exec_param = { - param: param_value - for param, param_value in locals().items() - if param - not in [ - "self", - "scripthash", - "method", - "arguments", - "multisig_hash", - "wallet_password", - ] - } - exec_param["timeout"] = f"{timeout}s" - exec_param["post_data"] = post_data - if wallet_password is not None: - return self._execute_with_password( - "contract invokefunction", wallet_password, **exec_param - ) - if wallet_config: - return self._execute("contract invokefunction", **exec_param) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) - - def testinvokefunction( - self, - scripthash: str, - wallet: Optional[str] = None, - wallet_password: Optional[str] = None, - method: Optional[str] = None, - arguments: Optional[str] = None, - multisig_hash: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Executes given (as a script hash) deployed script. - - Script is executed with the given method, arguments and signers (sender is not included - by default). If no method is given "" is passed to the script, if no arguments are given, - an empty array is passed, if no signers are given no array is passed. If signers are - specified, the first one of them is treated as a sender. All of the given arguments are - encapsulated into array before invoking the script. The script thus should follow the - regular convention of smart contract arguments (method string and an array of other - arguments). - See more information and samples in `neo-go contract testinvokefunction --help`. - - Args: - scripthash: Function hash. - wallet: Wallet to use for testinvoke. - wallet_password: Wallet password. - method: Call method. - arguments: Method arguments. - multisig_hash: Multisig hash. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - multisig_hash = f"-- {multisig_hash}" if multisig_hash else "" - post_data = f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}" - exec_param = { - param: param_value - for param, param_value in locals().items() - if param - not in [ - "self", - "scripthash", - "method", - "arguments", - "multisig_hash", - "wallet_password", - ] - } - exec_param["timeout"] = f"{timeout}s" - exec_param["post_data"] = post_data - if wallet_password is not None: - return self._execute_with_password( - "contract testinvokefunction", wallet_password, **exec_param - ) - - return self._execute("contract testinvokefunction", **exec_param) - - def testinvokescript( - self, - input_file: str, - rpc_endpoint: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Executes given compiled AVM instructions in NEF format. - - Instructions are executed with the given set of signers not including sender by default. - See testinvokefunction documentation for the details about parameters. - - Args: - input_file: Input location of the .nef file that needs to be invoked. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - exec_param = { - param: param_value for param, param_value in locals().items() if param not in ["self"] - } - exec_param["timeout"] = f"{timeout}s" - return self._execute( - "contract testinvokescript", - **exec_param, - ) - - def init(self, name: str, skip_details: bool = False) -> CommandResult: - """Initialize a new smart-contract in a directory with boiler plate code. - - Args: - name: Name of the smart-contract to be initialized. - skip_details: Skip filling in the projects and contract details. - - Returns: - Command's result. - """ - return self._execute( - "contract init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def inspect( - self, - input_file: Optional[str] = None, - compile: Optional[str] = None, - ) -> CommandResult: - """Creates a user readable dump of the program instructions. - - Args: - input_file: Input file of the program (either .go or .nef). - compile: Compile input file (it should be go code then). - - Returns: - Command's result. - """ - return self._execute( - "contract inspect", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def calc_hash( - self, - input_file: str, - manifest: str, - sender: Optional[str] = None, - ) -> CommandResult: - """Calculates hash of a contract after deployment. - - Args: - input_file: Path to NEF file. - sender: Sender script hash or address. - manifest: Path to manifest file. - - Returns: - Command's result. - """ - return self._execute( - "contract calc-hash", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def add_group( - self, - manifest: str, - address: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - sender: Optional[str] = None, - nef: Optional[str] = None, - ) -> CommandResult: - """Adds group to the manifest. - - Args: - wallet: Wallet to use to get the key for transaction signing; - conflicts with wallet_config. - wallet_config: Path to wallet config to use to get the key for transaction signing; - conflicts with wallet. - wallet_password: Wallet password. - sender: Deploy transaction sender. - address: Account to sign group with. - nef: Path to the NEF file. - manifest: Path to the manifest. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - if wallet_password is not None: - return self._execute_with_password( - "contract manifest add-group", wallet_password, **exec_param - ) - if wallet_config: - return self._execute("contract manifest add-group", **exec_param) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) diff --git a/src/frostfs_testlib/cli/neogo/db.py b/src/frostfs_testlib/cli/neogo/db.py deleted file mode 100644 index 4b456c3..0000000 --- a/src/frostfs_testlib/cli/neogo/db.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.cli.neogo.network_type import NetworkType -from frostfs_testlib.shell import CommandResult - - -class NeoGoDb(CliCommand): - def dump( - self, - config_path: str, - out: str, - network: NetworkType = NetworkType.PRIVATE, - count: int = 0, - start: int = 0, - ) -> CommandResult: - """Dump blocks (starting with block #1) to the file. - - Args: - config_path: Path to config. - network: Select network type (default: private). - count: Number of blocks to be processed (default or 0: all chain) (default: 0). - start: Block number to start from (default: 0) (default: 0). - out: Output file (stdout if not given). - - Returns: - Command's result. - """ - return self._execute( - "db dump", - **{network.value: True}, - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def restore( - self, - config_path: str, - input_file: str, - network: NetworkType = NetworkType.PRIVATE, - count: int = 0, - dump: Optional[str] = None, - incremental: bool = False, - ) -> CommandResult: - """Dump blocks (starting with block #1) to the file. - - Args: - config_path: Path to config. - network: Select network type (default: private). - count: Number of blocks to be processed (default or 0: all chain) (default: 0). - input_file: Input file (stdin if not given). - dump: Directory for storing JSON dumps. - incremental: Use if dump is incremental. - - Returns: - Command's result. - """ - return self._execute( - "db restore", - **{network.value: True}, - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/frostfs_testlib/cli/neogo/go.py b/src/frostfs_testlib/cli/neogo/go.py deleted file mode 100644 index 9e7286c..0000000 --- a/src/frostfs_testlib/cli/neogo/go.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.neogo.candidate import NeoGoCandidate -from frostfs_testlib.cli.neogo.contract import NeoGoContract -from frostfs_testlib.cli.neogo.db import NeoGoDb -from frostfs_testlib.cli.neogo.nep17 import NeoGoNep17 -from frostfs_testlib.cli.neogo.node import NeoGoNode -from frostfs_testlib.cli.neogo.query import NeoGoQuery -from frostfs_testlib.cli.neogo.version import NeoGoVersion -from frostfs_testlib.cli.neogo.wallet import NeoGoWallet -from frostfs_testlib.shell import Shell - - -class NeoGo: - candidate: NeoGoCandidate - contract: NeoGoContract - db: NeoGoDb - nep17: NeoGoNep17 - node: NeoGoNode - query: NeoGoQuery - version: NeoGoVersion - wallet: NeoGoWallet - - def __init__( - self, - shell: Shell, - neo_go_exec_path: str, - config_path: Optional[str] = None, - ): - self.candidate = NeoGoCandidate(shell, neo_go_exec_path, config_path=config_path) - self.contract = NeoGoContract(shell, neo_go_exec_path, config_path=config_path) - self.db = NeoGoDb(shell, neo_go_exec_path, config_path=config_path) - self.nep17 = NeoGoNep17(shell, neo_go_exec_path, config_path=config_path) - self.node = NeoGoNode(shell, neo_go_exec_path, config_path=config_path) - self.query = NeoGoQuery(shell, neo_go_exec_path, config_path=config_path) - self.version = NeoGoVersion(shell, neo_go_exec_path, config_path=config_path) - self.wallet = NeoGoWallet(shell, neo_go_exec_path, config_path=config_path) diff --git a/src/frostfs_testlib/cli/neogo/nep17.py b/src/frostfs_testlib/cli/neogo/nep17.py deleted file mode 100644 index a3dcb12..0000000 --- a/src/frostfs_testlib/cli/neogo/nep17.py +++ /dev/null @@ -1,240 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class NeoGoNep17(CliCommand): - def balance( - self, - address: str, - token: str, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Get address balance. - - Args: - address: Address to use. - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - token: Token to use (hash or name (for NEO/GAS or imported tokens)). - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value for param, param_value in locals().items() if param not in ["self"] - } - exec_param["timeout"] = f"{timeout}s" - return self._execute( - "wallet nep17 balance", - **exec_param, - ) - - def import_token( - self, - address: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - token: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Import NEP-17 token to a wallet. - - Args: - address: Token contract address or hash in LE. - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - token: Token to use (hash or name (for NEO/GAS or imported tokens)). - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value for param, param_value in locals().items() if param not in ["self"] - } - exec_param["timeout"] = f"{timeout}s" - return self._execute( - "wallet nep17 import", - **exec_param, - ) - - def info( - self, - token: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - ) -> CommandResult: - """Print imported NEP-17 token info. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - token: Token to use (hash or name (for NEO/GAS or imported tokens)). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet nep17 info", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def remove( - self, - token: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - force: bool = False, - ) -> CommandResult: - """Remove NEP-17 token from the wallet. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - token: Token to use (hash or name (for NEO/GAS or imported tokens)). - force: Do not ask for a confirmation. - - Returns: - Command's result. - """ - return self._execute( - "wallet nep17 remove", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def transfer( - self, - token: str, - to_address: str, - rpc_endpoint: str, - sysgas: Optional[float] = None, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - out: Optional[str] = None, - from_address: Optional[str] = None, - force: bool = False, - gas: Optional[float] = None, - amount: float = 0, - timeout: int = 10, - ) -> CommandResult: - """Transfers specified NEP-17 token amount. - - Transfer is executed with optional 'data' parameter and cosigners list attached to the - transfer. See 'contract testinvokefunction' documentation for the details about 'data' - parameter and cosigners syntax. If no 'data' is given then default nil value will be used. - If no cosigners are given then the sender with CalledByEntry scope will be used as the only - signer. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wallet_password: Wallet password. - out: File to put JSON transaction to. - from_address: Address to send an asset from. - to_address: Address to send an asset to. - token: Token to use (hash or name (for NEO/GAS or imported tokens)). - force: Do not ask for a confirmation. - gas: Network fee to add to the transaction (prioritizing it). - sysgas: System fee to add to transaction (compensating for execution). - force: Do not ask for a confirmation. - amount: Amount of asset to send. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - exec_param["timeout"] = f"{timeout}s" - - if wallet_password is not None: - return self._execute_with_password( - "wallet nep17 transfer", - wallet_password, - **exec_param, - ) - if wallet_config: - return self._execute( - "wallet nep17 transfer", - **exec_param, - ) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) - - def multitransfer( - self, - token: str, - to_address: list[str], - sysgas: float, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - out: Optional[str] = None, - from_address: Optional[str] = None, - force: bool = False, - gas: Optional[float] = None, - amount: float = 0, - timeout: int = 10, - ) -> CommandResult: - """Transfer NEP-17 tokens to multiple recipients. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - out: File to put JSON transaction to. - from_address: Address to send an asset from. - to_address: Address to send an asset to. - token: Token to use (hash or name (for NEO/GAS or imported tokens)). - force: Do not ask for a confirmation. - gas: Network fee to add to the transaction (prioritizing it). - sysgas: System fee to add to transaction (compensating for execution). - force: Do not ask for a confirmation. - amount: Amount of asset to send. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value for param, param_value in locals().items() if param not in ["self"] - } - exec_param["timeout"] = f"{timeout}s" - return self._execute( - "wallet nep17 multitransfer", - **exec_param, - ) diff --git a/src/frostfs_testlib/cli/neogo/network_type.py b/src/frostfs_testlib/cli/neogo/network_type.py deleted file mode 100644 index 9129f88..0000000 --- a/src/frostfs_testlib/cli/neogo/network_type.py +++ /dev/null @@ -1,7 +0,0 @@ -from enum import Enum - - -class NetworkType(Enum): - PRIVATE = "privnet" - MAIN = "mainnet" - TEST = "testnet" diff --git a/src/frostfs_testlib/cli/neogo/node.py b/src/frostfs_testlib/cli/neogo/node.py deleted file mode 100644 index ccc833d..0000000 --- a/src/frostfs_testlib/cli/neogo/node.py +++ /dev/null @@ -1,16 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.cli.neogo.network_type import NetworkType -from frostfs_testlib.shell import CommandResult - - -class NeoGoNode(CliCommand): - def start(self, network: NetworkType = NetworkType.PRIVATE) -> CommandResult: - """Start a NEO node. - - Args: - network: Select network type (default: private). - - Returns: - Command's result. - """ - return self._execute("start", **{network.value: True}) diff --git a/src/frostfs_testlib/cli/neogo/query.py b/src/frostfs_testlib/cli/neogo/query.py deleted file mode 100644 index 1422daf..0000000 --- a/src/frostfs_testlib/cli/neogo/query.py +++ /dev/null @@ -1,100 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class NeoGoQuery(CliCommand): - def candidates(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: - """Get candidates and votes. - - Args: - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - return self._execute( - "query candidates", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def committee(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: - """Get committee list. - - Args: - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - return self._execute( - "query committee", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def height(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: - """Get node height. - - Args: - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - return self._execute( - "query height", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def tx(self, tx_hash: str, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: - """Query transaction status. - - Args: - tx_hash: Hash of transaction. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - return self._execute( - f"query tx {tx_hash}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "tx_hash"] - }, - ) - - def voter(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: - """Print NEO holder account state. - - Args: - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - return self._execute( - "query voter", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/frostfs_testlib/cli/neogo/version.py b/src/frostfs_testlib/cli/neogo/version.py deleted file mode 100644 index 3f6ce3f..0000000 --- a/src/frostfs_testlib/cli/neogo/version.py +++ /dev/null @@ -1,12 +0,0 @@ -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class NeoGoVersion(CliCommand): - def get(self) -> CommandResult: - """Application version. - - Returns: - Command's result. - """ - return self._execute("", version=True) diff --git a/src/frostfs_testlib/cli/neogo/wallet.py b/src/frostfs_testlib/cli/neogo/wallet.py deleted file mode 100644 index 2f3e518..0000000 --- a/src/frostfs_testlib/cli/neogo/wallet.py +++ /dev/null @@ -1,381 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class NeoGoWallet(CliCommand): - def claim( - self, - address: str, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Claim GAS. - - Args: - address: Address to claim GAS for. - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value for param, param_value in locals().items() if param not in ["self"] - } - exec_param["timeout"] = f"{timeout}s" - return self._execute( - "wallet claim", - **exec_param, - ) - - def init( - self, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - account: bool = False, - ) -> CommandResult: - """Create a new wallet. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - account: Create a new account. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def convert( - self, - out: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - ) -> CommandResult: - """Convert addresses from existing NEO2 NEP6-wallet to NEO3 format. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - out: Where to write converted wallet. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet convert", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def create( - self, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - ) -> CommandResult: - """Add an account to the existing wallet. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet create", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def dump( - self, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - decrypt: bool = False, - ) -> CommandResult: - """Check and dump an existing NEO wallet. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - decrypt: Decrypt encrypted keys. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet dump", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def dump_keys( - self, - address: Optional[str] = None, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - ) -> CommandResult: - """Check and dump an existing NEO wallet. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - address: Address to print public keys for. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet dump-keys", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def export( - self, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - decrypt: bool = False, - ) -> CommandResult: - """Export keys for address. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - decrypt: Decrypt encrypted keys. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet export", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def import_wif( - self, - wif: str, - name: str, - contract: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - ) -> CommandResult: - """Import WIF of a standard signature contract. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wif: WIF to import. - name: Optional account name. - contract: Verification script for custom contracts. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet import", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def import_multisig( - self, - wif: str, - name: Optional[str] = None, - min_number: int = 0, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - ) -> CommandResult: - """Import multisig contract. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wif: WIF to import. - name: Optional account name. - min_number: Minimal number of signatures (default: 0). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet import-multisig", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def import_deployed( - self, - wif: str, - rpc_endpoint: str, - name: Optional[str] = None, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - contract: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Import deployed contract. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wif: WIF to import. - name: Optional account name. - contract: Contract hash or address. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value for param, param_value in locals().items() if param not in ["self"] - } - exec_param["timeout"] = f"{timeout}s" - return self._execute( - "wallet import-deployed", - **exec_param, - ) - - def remove( - self, - address: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - force: bool = False, - ) -> CommandResult: - """Remove an account from the wallet. - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - address: Account address or hash in LE form to be removed. - force: Do not ask for a confirmation. - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - - return self._execute( - "wallet remove", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) - - def sign( - self, - input_file: str, - address: str, - rpc_endpoint: Optional[str] = None, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - wallet_password: Optional[str] = None, - out: Optional[str] = None, - timeout: int = 10, - ) -> CommandResult: - """Cosign transaction with multisig/contract/additional account. - - Signs the given (in the input file) context (which must be a transaction signing context) - for the given address using the given wallet. This command can output the resulting JSON - (with additional signature added) right to the console (if no output file and no RPC - endpoint specified) or into a file (which can be the same as input one). If an RPC endpoint - is given it'll also try to construct a complete transaction and send it via RPC (printing - its hash if everything is OK). - - Args: - wallet: Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config: Target location of the wallet config file; conflicts with --wallet flag. - wallet_password: Wallet password. - out: File to put JSON transaction to. - input_file: File with JSON transaction. - address: Address to use. - rpc_endpoint: RPC node address. - timeout: Timeout for the operation (default: 10s). - - Returns: - Command's result. - """ - assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - exec_param = { - param: param_value - for param, param_value in locals().items() - if param not in ["self", "wallet_password"] - } - exec_param["timeout"] = f"{timeout}s" - if wallet_password is not None: - return self._execute_with_password("wallet sign", wallet_password, **exec_param) - - if wallet_config: - return self._execute("wallet sign", **exec_param) - - raise Exception(self.WALLET_PASSWD_ERROR_MSG) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py deleted file mode 100644 index 4b4a501..0000000 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ /dev/null @@ -1,102 +0,0 @@ -import re - -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus - - -class NetmapParser: - @staticmethod - def netinfo(output: str) -> NodeNetInfo: - regexes = { - "epoch": r"Epoch: (?P\d+)", - "network_magic": r"Network magic: (?P.*$)", - "time_per_block": r"Time per block: (?P\d+\w+)", - "container_fee": r"Container fee: (?P\d+)", - "epoch_duration": r"Epoch duration: (?P\d+)", - "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", - "maximum_object_size": r"Maximum object size: (?P\d+)", - "maximum_count_of_data_shards": r"Maximum count of data shards: (?P\d+)", - "maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P\d+)", - "withdrawal_fee": r"Withdrawal fee: (?P\d+)", - "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", - "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", - } - parse_result = {} - - for key, regex in regexes.items(): - search_result = re.search(regex, output, flags=re.MULTILINE) - if search_result == None: - parse_result[key] = None - continue - parse_result[key] = search_result[key].strip() - - node_netinfo = NodeNetInfo(**parse_result) - - return node_netinfo - - @staticmethod - def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]: - """The code will parse each line and return each node as dataclass.""" - netmap_nodes = output.split("Node ")[1:] - dataclasses_netmap = [] - result_netmap = {} - - regexes = { - "node_id": r"\d+: (?P\w+)", - "node_data_ips": r"(?P/ip4/.+?)$", - "node_status": r"(?PONLINE|MAINTENANCE|OFFLINE)", - "cluster_name": r"ClusterName: (?P\w+)", - "continent": r"Continent: (?P\w+)", - "country": r"Country: (?P\w+)", - "country_code": r"CountryCode: (?P\w+)", - "external_address": r"ExternalAddr: (?P/ip[4].+?)$", - "location": r"Location: (?P\w+.*)", - "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", - "price": r"Price: (?P\d+)", - "sub_div": r"SubDiv: (?P.*)", - "sub_div_code": r"SubDivCode: (?P\w+)", - "un_locode": r"UN-LOCODE: (?P\w+.*)", - "role": r"role: (?P\w+)", - } - - for node in netmap_nodes: - for key, regex in regexes.items(): - search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result is None: - result_netmap[key] = None - continue - if key == "node_data_ips": - result_netmap[key] = search_result[key].strip().split(" ") - continue - if key == "external_address": - result_netmap[key] = search_result[key].strip().split(",") - continue - if key == "node_status": - result_netmap[key] = NodeStatus(search_result[key].strip().lower()) - continue - result_netmap[key] = search_result[key].strip() - - dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) - - return dataclasses_netmap - - @staticmethod - def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: - snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - for snapshot in snapshot_nodes: - for endpoint in snapshot.external_address: - if rpc_endpoint.split(":")[0] in endpoint: - return snapshot - - @staticmethod - def node_info(output: dict) -> NodeInfo: - data_dict = {"attributes": {}} - - for key, value in output.items(): - if key != "attributes": - data_dict[key] = value - - for attribute in output["attributes"]: - data_dict["attributes"][attribute["key"]] = attribute["value"] - - return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py deleted file mode 100644 index e46766b..0000000 --- a/src/frostfs_testlib/clients/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper -from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py deleted file mode 100644 index ab6e2b0..0000000 --- a/src/frostfs_testlib/clients/http/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py deleted file mode 100644 index 16d7707..0000000 --- a/src/frostfs_testlib/clients/http/http_client.py +++ /dev/null @@ -1,152 +0,0 @@ -import io -import json -import logging -import logging.config -from typing import Mapping, Sequence - -import httpx - -from frostfs_testlib import reporter - -timeout = httpx.Timeout(60, read=150) -LOGGING_CONFIG = { - "disable_existing_loggers": False, - "version": 1, - "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, - "formatters": { - "http": { - "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", - "datefmt": "%Y-%m-%d %H:%M:%S", - } - }, - "loggers": { - "httpx": { - "handlers": ["default"], - "level": "ERROR", - }, - "httpcore": { - "handlers": ["default"], - "level": "ERROR", - }, - }, -} - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("NeoLogger") - - -class HttpClient: - @reporter.step("Send {method} request to {url}") - def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: - transport = httpx.HTTPTransport(verify=False, retries=5) - client = httpx.Client(timeout=timeout, transport=transport) - response = client.request(method, url, **kwargs) - - self._attach_response(response, **kwargs) - # logger.info(f"Response: {response.status_code} => {response.text}") - - if expected_status_code: - assert ( - response.status_code == expected_status_code - ), f"Got {response.status_code} response code while {expected_status_code} expected" - - return response - - @classmethod - def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: - try: - content = readable.read() - except Exception as e: - logger.warning(f"Unable to read file: {str(e)}") - return None - - if not content: - return None - - request_body = None - - try: - request_body = json.loads(content) - except (json.JSONDecodeError, UnicodeDecodeError) as e: - logger.warning(f"Unable to convert body to json: {str(e)}") - - if request_body is not None: - return json.dumps(request_body, default=str, indent=4) - - try: - request_body = content.decode() - except UnicodeDecodeError as e: - logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") - - request_body = content if request_body is None else request_body - request_body = "" if len(request_body) > 1000 else request_body - - return request_body - - @classmethod - def _parse_files(cls, files: Mapping | Sequence | None) -> dict: - filepaths = {} - - if not files: - return filepaths - - if isinstance(files, Sequence): - items = files - elif isinstance(files, Mapping): - items = files.items() - else: - raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") - - for name, file in items: - if isinstance(file, io.IOBase): - filepaths[name] = file.name - elif isinstance(file, Sequence): - filepaths[name] = file[1].name - - return filepaths - - @classmethod - def _attach_response(cls, response: httpx.Response, **kwargs): - request = response.request - request_headers = json.dumps(dict(request.headers), default=str, indent=4) - request_body = cls._parse_body(request) - - files = kwargs.get("files") - request_files = cls._parse_files(files) - - response_headers = json.dumps(dict(response.headers), default=str, indent=4) - response_body = cls._parse_body(response) - - report = ( - f"Method: {request.method}\n\n" - + f"URL: {request.url}\n\n" - + f"Request Headers: {request_headers}\n\n" - + (f"Request Body: {request_body}\n\n" if request_body else "") - + (f"Request Files: {request_files}\n\n" if request_files else "") - + f"Response Status Code: {response.status_code}\n\n" - + f"Response Headers: {response_headers}\n\n" - + (f"Response Body: {response_body}\n\n" if response_body else "") - ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) - - reporter.attach(report, "Requests Info") - reporter.attach(curl_request, "CURL") - cls._write_log(curl_request, response_body, response.status_code) - - @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} - headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) - - data = f" -d '{data}'" if data else "" - for name, path in files.items(): - data += f' -F "{name}=@{path}"' - - # Option -k means no verify SSL - return f"curl {url} -X {method} {headers}{data} -k" - - @classmethod - def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: - if res_body: - curl += f"\nResponse: {res_code}\n{res_body}" - logger.info(f"{curl}") diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py deleted file mode 100644 index 5481f48..0000000 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py deleted file mode 100644 index c1dd6b6..0000000 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ /dev/null @@ -1,1548 +0,0 @@ -import json -import logging -import os -from datetime import datetime -from time import sleep -from typing import Literal, Optional, Union - -from frostfs_testlib import reporter -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.utils import string_utils - -# TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") -command_options = CommandOptions(timeout=480) - - -class AwsCliClient(S3ClientWrapper): - __repr_name__: str = "AWS CLI" - - # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed - # certificate in devenv) and disable automatic pagination in CLI output - common_flags = "--no-verify-ssl --no-paginate" - s3gate_endpoint: str - - @reporter.step("Configure S3 client (aws cli)") - def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.s3gate_endpoint = s3gate_endpoint - self.iam_endpoint = None - - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key - self.profile = profile - self.region = region - - self.local_shell = LocalShell() - try: - _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) - self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") - self.local_shell.exec( - f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", - ) - except Exception as err: - raise RuntimeError("Error while configuring AwsCliClient") from err - - @reporter.step("Set S3 endpoint to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - self.s3gate_endpoint = s3gate_endpoint - - @reporter.step("Set IAM endpoint to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - self.iam_endpoint = iam_endpoint - - @reporter.step("Create bucket S3") - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = string_utils.unique_name("bucket-") - - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - if object_lock_enabled_for_bucket is None: - object_lock = "" - elif object_lock_enabled_for_bucket: - object_lock = " --object-lock-enabled-for-bucket" - else: - object_lock = " --no-object-lock-enabled-for-bucket" - cmd = ( - f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " - f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_full_control: - cmd += f" --grant-full-control {grant_full_control}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - if location_constraint: - cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" - self.local_shell.exec(cmd) - - return bucket - - @reporter.step("List buckets S3") - def list_buckets(self) -> list[str]: - cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - buckets_json = self._to_json(output) - return [bucket["Name"] for bucket in buckets_json["Buckets"]] - - @reporter.step("Delete bucket S3") - def delete_bucket(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - self.local_shell.exec(cmd, command_options) - - @reporter.step("Head bucket S3") - def head_bucket(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - self.local_shell.exec(cmd) - - @reporter.step("Put bucket versioning status") - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " - f"--versioning-configuration Status={status.value} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket versioning status") - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Status") - - @reporter.step("Put bucket tagging") - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} - cmd = ( - f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " - f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket tagging") - def get_bucket_tagging(self, bucket: str) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("TagSet") - - @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - return self._to_json(output) - - @reporter.step("Get bucket location") - def get_bucket_location(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("LocationConstraint") - - @reporter.step("List objects S3") - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " - if page_size: - cmd = cmd.replace("--no-paginate", "") - cmd += f" --page-size {page_size} " - if prefix: - cmd += f" --prefix {prefix}" - if self.profile: - cmd += f" --profile {self.profile} " - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects S3 v2") - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects versions S3") - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else response.get("Versions", []) - - @reporter.step("List objects delete markers S3") - def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else response.get("DeleteMarkers", []) - - @reporter.step("Copy object S3") - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = source_bucket - - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - if key is None: - key = string_utils.unique_name("copy-object-") - - copy_source = f"{source_bucket}/{source_key}" - - cmd = ( - f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " - f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if metadata_directive: - cmd += f" --metadata-directive {metadata_directive}" - if metadata: - cmd += " --metadata " - for meta_key, value in metadata.items(): - cmd += f" {meta_key}={value}" - if tagging_directive: - cmd += f" --tagging-directive {tagging_directive}" - if tagging: - cmd += f" --tagging {tagging}" - self.local_shell.exec(cmd, command_options) - return key - - @reporter.step("Put object S3") - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - if key is None: - key = os.path.basename(filepath) - - cmd = ( - f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " - f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if tagging: - cmd += f" --tagging '{tagging}'" - if acl: - cmd += f" --acl {acl}" - if object_lock_mode: - cmd += f" --object-lock-mode {object_lock_mode}" - if object_lock_retain_until_date: - cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' - if object_lock_legal_hold_status: - cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" - if grant_full_control: - cmd += f" --grant-full-control '{grant_full_control}'" - if grant_read: - cmd += f" --grant-read {grant_read}" - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - return response.get("VersionId") - - @reporter.step("Head object S3") - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Get object S3") - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> dict | TestFile: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if object_range: - cmd += f" --range bytes={object_range[0]}-{object_range[1]}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else test_file - - @reporter.step("Get object ACL") - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Put object ACL") - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " - f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Put bucket ACL") - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " - f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - self.local_shell.exec(cmd) - - @reporter.step("Delete objects S3") - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") - delete_structure = json.dumps(_make_objs_dict(keys)) - with open(file_path, "w") as out_file: - out_file.write(delete_structure) - logger.info(f"Input file for delete-objects: {delete_structure}") - - cmd = ( - f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - return response - - @reporter.step("Delete object S3") - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api delete-object --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Delete object versions S3") - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - # Build deletion list in S3 format - delete_list = { - "Objects": [ - { - "Key": object_version["Key"], - "VersionId": object_version["VersionId"], - } - for object_version in object_versions - ] - } - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") - delete_structure = json.dumps(delete_list) - with open(file_path, "w") as out_file: - out_file.write(delete_structure) - logger.info(f"Input file for delete-objects: {delete_structure}") - - cmd = ( - f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Delete object versions S3 without delete markers") - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - # Delete objects without creating delete markers - for object_version in object_versions: - self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) - - @reporter.step("Get object attributes") - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: str = "", - max_parts: int = 0, - part_number: int = 0, - full_output: bool = True, - ) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - attrs = ",".join(attributes) - version = f" --version-id {version_id}" if version_id else "" - parts = f"--max-parts {max_parts}" if max_parts else "" - part_number_str = f"--part-number-marker {part_number}" if part_number else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " - f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - for attr in attributes: - assert attr in response, f"Expected attribute {attr} in {response}" - - if full_output: - return response - else: - return response.get(attributes[0]) - - @reporter.step("Get bucket policy") - def get_bucket_policy(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Policy") - - @reporter.step("Delete bucket policy") - def delete_bucket_policy(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Put bucket policy") - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - # Leaving it as is was in test repo. Double dumps to escape resulting string - # Example: - # policy = {"a": 1} - # json.dumps(policy) => {"a": 1} - # json.dumps(json.dumps(policy)) => "{\"a\": 1}" - # TODO: update this - dumped_policy = json.dumps(json.dumps(policy)) - cmd = ( - f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " - f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket cors") - def get_bucket_cors(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("CORSRules") - - @reporter.step("Put bucket cors") - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " - f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Delete bucket cors") - def delete_bucket_cors(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Delete bucket tagging") - def delete_bucket_tagging(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object retention") - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " - f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if bypass_governance_retention is not None: - cmd += " --bypass-governance-retention" - self.local_shell.exec(cmd) - - @reporter.step("Put object legal hold") - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - legal_hold = json.dumps({"Status": legal_hold_status}) - cmd = ( - f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " - f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get object tagging") - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("TagSet") - - @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Sync directory S3") - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if acl: - cmd += f" --acl {acl}" - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("CP directory S3") - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if acl: - cmd += f" --acl {acl}" - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Create multipart upload S3") - def create_multipart_upload(self, bucket: str, key: str) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " - f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - - return response["UploadId"] - - @reporter.step("List multipart uploads S3") - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Uploads") - - @reporter.step("Abort multipart upload S3") - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Upload part S3") - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" - return response["ETag"] - - @reporter.step("Upload copy part S3") - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - - return response["CopyPartResult"]["ETag"] - - @reporter.step("List parts S3") - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Parts"), f"Expected Parts in response:\n{response}" - - return response["Parts"] - - @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") - parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} - - with open(file_path, "w") as out_file: - out_file.write(json.dumps(parts_dict)) - - logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") - - cmd = ( - f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Put object lock configuration") - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " - f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - return self._to_json(output) - - @reporter.step("Get object lock configuration") - def get_object_lock_configuration(self, bucket: str): - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("ObjectLockConfiguration") - - @reporter.step("Put bucket lifecycle configuration") - def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Get bucket lifecycle configuration") - def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Delete bucket lifecycle configuration") - def delete_bucket_lifecycle(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @staticmethod - def _to_json(output: str) -> dict: - json_output = {} - if "{" not in output and "}" not in output: - logger.warning(f"Could not parse json from output {output}") - return json_output - - json_output = json.loads(output[output.index("{") :]) - - return json_output - - @reporter.step("Create presign url for the object") - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - # AWS CLI does not support method definition and world only in 'get_object' state by default - cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - if expires_in: - cmd += f" --expires-in {expires_in}" - response = self.local_shell.exec(cmd).stdout - return response.strip() - - # IAM METHODS # - # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) - - @reporter.step("Adds the specified user to the specified group") - def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Attaches the specified managed policy to the specified IAM group") - def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Attaches the specified managed policy to the specified user") - def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: - cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - if user_name: - cmd += f" --user-name {user_name}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - access_key_id = response["AccessKey"].get("AccessKeyId") - secret_access_key = response["AccessKey"].get("SecretAccessKey") - assert access_key_id, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - - return access_key_id, secret_access_key - - @reporter.step("Creates a new group") - def iam_create_group(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Group"), f"Expected Group in response:\n{response}" - assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - - return response - - @reporter.step("Creates a new managed policy for your AWS account") - def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}" - f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" - ) - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" - - return response - - @reporter.step("Creates a new IAM user for your AWS account") - def iam_create_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Deletes the access key pair associated with the specified IAM user") - def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified IAM group") - def iam_delete_group(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified managed policy") - def iam_delete_policy(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified IAM user") - def iam_delete_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Removes the specified managed policy from the specified IAM group") - def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Removes the specified managed policy from the specified user") - def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Returns a list of IAM users that are in the specified IAM group") - def iam_get_group(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Users" in response.keys(), f"Expected Users in response:\n{response}" - assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Retrieves information about the specified managed policy") - def iam_get_policy(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" - - return response - - @reporter.step("Retrieves information about the specified version of the specified managed policy") - def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" - assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" - - return response - - @reporter.step("Retrieves information about the specified IAM user") - def iam_get_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("UserName"), f"Expected User in response:\n{response}" - - return response - - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - def iam_list_access_keys(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Lists all managed policies that are attached to the specified IAM group") - def iam_list_attached_group_policies(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - - return response - - @reporter.step("Lists all managed policies that are attached to the specified IAM user") - def iam_list_attached_user_policies(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - - return response - - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" - assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" - - return response - - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - def iam_list_group_policies(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - - return response - - @reporter.step("Lists the IAM groups") - def iam_list_groups(self) -> dict: - cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - - return response - - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - def iam_list_groups_for_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - - return response - - @reporter.step("Lists all the managed policies that are available in your AWS account") - def iam_list_policies(self) -> dict: - cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" - - return response - - @reporter.step("Lists information about the versions of the specified managed policy") - def iam_list_policy_versions(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Versions"), f"Expected Versions in response:\n{response}" - - return response - - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - def iam_list_user_policies(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - - return response - - @reporter.step("Lists the IAM users") - def iam_list_users(self) -> dict: - cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Users" in response.keys(), f"Expected Users in response:\n{response}" - - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - cmd = ( - f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" - f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" - ) - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - cmd = ( - f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" - f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" - ) - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Removes the specified user from the specified group") - def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}" - f" --group-name {group_name} --user-name {user_name}" - ) - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Updates the name and/or the path of the specified IAM group") - def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if new_name: - cmd += f" --new-group-name {new_name}" - if new_path: - cmd += f" --new-path {new_path}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Updates the name and/or the path of the specified IAM user") - def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if new_name: - cmd += f" --new-user-name {new_name}" - if new_path: - cmd += f" --new-path {new_path}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Adds one or more tags to an IAM user") - def iam_tag_user(self, user_name: str, tags: list) -> dict: - tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - cmd = ( - f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}" - ) - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("List tags of IAM user") - def iam_list_user_tags(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Removes the specified tags from the user") - def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - tag_keys_joined = " ".join(tag_keys) - cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - # MFA METHODS - @reporter.step("Creates a new virtual MFA device") - def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: - cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ - --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" - - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") - assert serial_number, f"Expected SerialNumber in response:\n{response}" - - return serial_number, False - - @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes a virtual MFA device") - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ - --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Lists the MFA devices for an IAM user") - def iam_list_virtual_mfa_devices(self) -> dict: - cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" - - return response - - @reporter.step("Get session token for user") - def sts_get_session_token( - self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None - ) -> tuple: - cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" - if duration_seconds: - cmd += f" --duration-seconds {duration_seconds}" - if serial_number: - cmd += f" --serial-number {serial_number}" - if token_code: - cmd += f" --token-code {token_code}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - access_key = response.get("Credentials", {}).get("AccessKeyId") - secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") - session_token = response.get("Credentials", {}).get("SessionToken") - assert access_key, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - assert session_token, f"Expected SessionToken in response:\n{response}" - - return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py deleted file mode 100644 index ac4d55b..0000000 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ /dev/null @@ -1,1362 +0,0 @@ -import json -import logging -import os -from collections.abc import Callable -from datetime import datetime -from time import sleep -from typing import Literal, Optional, Union - -import boto3 -import urllib3 -from botocore.config import Config -from botocore.exceptions import ClientError -from mypy_boto3_iam import IAMClient -from mypy_boto3_s3 import S3Client -from mypy_boto3_sts import STSClient - -from frostfs_testlib import reporter -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.utils import string_utils - -# TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import log_command_execution -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") - -# Disable warnings on self-signed certificate which the -# boto library produces on requests to S3-gate in dev-env -urllib3.disable_warnings() - - -class Boto3ClientWrapper(S3ClientWrapper): - __repr_name__: str = "Boto3 client" - - @reporter.step("Configure S3 client (boto3)") - def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.s3gate_endpoint: str = "" - self.boto3_client: S3Client = None - - self.iam_endpoint: str = "" - self.boto3_iam_client: IAMClient = None - self.boto3_sts_client: STSClient = None - - self.access_key_id = access_key_id - self.secret_access_key = secret_access_key - self.profile = profile - self.region = region - - self.session = boto3.Session() - self.config = Config( - signature_version="s3v4", - retries={ - "max_attempts": MAX_REQUEST_ATTEMPTS, - "mode": RETRY_MODE, - }, - ) - - self.set_endpoint(s3gate_endpoint) - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - if self.s3gate_endpoint == s3gate_endpoint: - return - - self.s3gate_endpoint = s3gate_endpoint - - self.boto3_client: S3Client = self.session.client( - service_name="s3", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - region_name=self.region, - config=self.config, - endpoint_url=s3gate_endpoint, - verify=False, - ) - - @reporter.step("Set endpoint IAM to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - if self.iam_endpoint == iam_endpoint: - return - - self.iam_endpoint = iam_endpoint - - self.boto3_iam_client = self.session.client( - service_name="iam", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - region_name=self.region, - endpoint_url=self.iam_endpoint, - verify=False, - ) - # since the STS does not have an endpoint, IAM is used - self.boto3_sts_client = self.session.client( - service_name="sts", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - endpoint_url=iam_endpoint, - region_name=self.region, - verify=False, - ) - - def _to_s3_param(self, param: str) -> str: - replacement_map = { - "Acl": "ACL", - "Cors": "CORS", - "_": "", - } - result = param.title() - for find, replace in replacement_map.items(): - result = result.replace(find, replace) - return result - - def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: - exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] - return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} - - def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): - if not params: - params = {} - - try: - result = method(**params) - except ClientError as err: - log_command_execution(method.__name__, err.response, params, **kwargs) - raise - - log_command_execution(method.__name__, result, params, **kwargs) - return result - - # BUCKET METHODS # - @reporter.step("Create bucket S3") - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = string_utils.unique_name("bucket-") - - params = {"Bucket": bucket} - if object_lock_enabled_for_bucket is not None: - params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) - - if acl is not None: - params.update({"ACL": acl}) - elif grant_write or grant_read or grant_full_control: - if grant_write: - params.update({"GrantWrite": grant_write}) - elif grant_read: - params.update({"GrantRead": grant_read}) - elif grant_full_control: - params.update({"GrantFullControl": grant_full_control}) - - if location_constraint: - params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) - - self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) - return bucket - - @reporter.step("List buckets S3") - def list_buckets(self) -> list[str]: - response = self._exec_request( - self.boto3_client.list_buckets, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return [bucket["Name"] for bucket in response["Buckets"]] - - @reporter.step("Delete bucket S3") - def delete_bucket(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.delete_bucket, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Head bucket S3") - def head_bucket(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.head_bucket, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket versioning status") - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} - self._exec_request( - self.boto3_client.put_bucket_versioning, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket versioning status") - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - response = self._exec_request( - self.boto3_client.get_bucket_versioning, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Status") - - @reporter.step("Put bucket tagging") - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals(), exclude=["tags"]) - self._exec_request( - self.boto3_client.put_bucket_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket tagging") - def get_bucket_tagging(self, bucket: str) -> list: - response = self._exec_request( - self.boto3_client.get_bucket_tagging, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("TagSet") - - @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> dict: - return self._exec_request( - self.boto3_client.get_bucket_acl, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete bucket tagging") - def delete_bucket_tagging(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.delete_bucket_tagging, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket ACL") - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.put_bucket_acl, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object lock configuration") - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} - return self._exec_request( - self.boto3_client.put_object_lock_configuration, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get object lock configuration") - def get_object_lock_configuration(self, bucket: str) -> dict: - response = self._exec_request( - self.boto3_client.get_object_lock_configuration, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("ObjectLockConfiguration") - - @reporter.step("Get bucket policy") - def get_bucket_policy(self, bucket: str) -> str: - response = self._exec_request( - self.boto3_client.get_bucket_policy, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Policy") - - @reporter.step("Delete bucket policy") - def delete_bucket_policy(self, bucket: str) -> str: - return self._exec_request( - self.boto3_client.delete_bucket_policy, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket policy") - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - params = {"Bucket": bucket, "Policy": json.dumps(policy)} - return self._exec_request( - self.boto3_client.put_bucket_policy, - params, - # Overriding option for AWS CLI - policy=policy, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket cors") - def get_bucket_cors(self, bucket: str) -> dict: - response = self._exec_request( - self.boto3_client.get_bucket_cors, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("CORSRules") - - @reporter.step("Get bucket location") - def get_bucket_location(self, bucket: str) -> str: - response = self._exec_request( - self.boto3_client.get_bucket_location, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("LocationConstraint") - - @reporter.step("Put bucket cors") - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_client.put_bucket_cors, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete bucket cors") - def delete_bucket_cors(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.delete_bucket_cors, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket lifecycle configuration") - def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) - return self._exec_request( - self.boto3_client.put_bucket_lifecycle_configuration, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket lifecycle configuration") - def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - response = self._exec_request( - self.boto3_client.get_bucket_lifecycle_configuration, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return {"Rules": response.get("Rules")} - - @reporter.step("Delete bucket lifecycle configuration") - def delete_bucket_lifecycle(self, bucket: str) -> dict: - return self._exec_request( - self.boto3_client.delete_bucket_lifecycle, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - # END OF BUCKET METHODS # - # OBJECT METHODS # - - @reporter.step("List objects S3 v2") - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - response = self._exec_request( - self.boto3_client.list_objects_v2, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list - - @reporter.step("List objects S3") - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: - params = {"Bucket": bucket} - if page_size: - params["MaxKeys"] = page_size - if prefix: - params["Prefix"] = prefix - response = self._exec_request( - self.boto3_client.list_objects, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list - - @reporter.step("List objects versions S3") - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - response = self._exec_request( - self.boto3_client.list_object_versions, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response if full_output else response.get("Versions", []) - - @reporter.step("List objects delete markers S3") - def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - response = self._exec_request( - self.boto3_client.list_object_versions, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response if full_output else response.get("DeleteMarkers", []) - - @reporter.step("Put object S3") - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - if key is None: - key = os.path.basename(filepath) - - with open(filepath, "rb") as put_file: - body = put_file.read() - - params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) - response = self._exec_request( - self.boto3_client.put_object, - params, - body=filepath, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("VersionId") - - @reporter.step("Head object S3") - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_client.head_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete object S3") - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_client.delete_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete objects S3") - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} - response = self._exec_request( - self.boto3_client.delete_objects, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - assert ( - "Errors" not in response - ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - - return response - - @reporter.step("Delete object versions S3") - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - # Build deletion list in S3 format - delete_list = { - "Objects": [ - { - "Key": object_version["Key"], - "VersionId": object_version["VersionId"], - } - for object_version in object_versions - ] - } - params = {"Bucket": bucket, "Delete": delete_list} - return self._exec_request( - self.boto3_client.delete_objects, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete object versions S3 without delete markers") - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - # Delete objects without creating delete markers - for object_version in object_versions: - params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} - self._exec_request( - self.boto3_client.delete_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object ACL") - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.put_object_acl, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Grants") - - @reporter.step("Get object ACL") - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.get_object_acl, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Grants") - - @reporter.step("Copy object S3") - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = source_bucket - - if key is None: - key = string_utils.unique_name("copy-object-") - - copy_source = f"{source_bucket}/{source_key}" - params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) - - self._exec_request( - self.boto3_client.copy_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return key - - @reporter.step("Get object S3") - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> dict | TestFile: - range_str = None - if object_range: - range_str = f"bytes={object_range[0]}-{object_range[1]}" - - params = locals() - params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) - params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) - response = self._exec_request( - self.boto3_client.get_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - if full_output: - return response - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) - with open(test_file, "wb") as file: - chunk = response["Body"].read(1024) - while chunk: - file.write(chunk) - chunk = response["Body"].read(1024) - return test_file - - @reporter.step("Create multipart upload S3") - def create_multipart_upload(self, bucket: str, key: str) -> str: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.create_multipart_upload, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - return response["UploadId"] - - @reporter.step("List multipart uploads S3") - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - response = self._exec_request( - self.boto3_client.list_multipart_uploads, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Uploads") - - @reporter.step("Abort multipart upload S3") - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.abort_multipart_upload, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Upload part S3") - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - with open(filepath, "rb") as put_file: - body = put_file.read() - - params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) - params["PartNumber"] = part_num - - response = self._exec_request( - self.boto3_client.upload_part, - params, - body=filepath, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" - return response["ETag"] - - @reporter.step("Upload copy part S3") - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) - params["PartNumber"] = part_num - response = self._exec_request( - self.boto3_client.upload_part_copy, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - return response["CopyPartResult"]["ETag"] - - @reporter.step("List parts S3") - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.list_parts, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("Parts"), f"Expected Parts in response:\n{response}" - return response["Parts"] - - @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: - parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - params = self._convert_to_s3_params(locals(), exclude=["parts"]) - params["MultipartUpload"] = {"Parts": parts} - return self._exec_request( - self.boto3_client.complete_multipart_upload, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object retention") - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.put_object_retention, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object legal hold") - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - legal_hold = {"Status": legal_hold_status} - params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) - self._exec_request( - self.boto3_client.put_object_legal_hold, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals(), exclude=["tags"]) - self._exec_request( - self.boto3_client.put_object_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get object tagging") - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.get_object_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("TagSet") - - @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.delete_object_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get object attributes") - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: Optional[str] = None, - max_parts: Optional[int] = None, - part_number: Optional[int] = None, - full_output: bool = True, - ) -> dict: - logger.warning("Method get_object_attributes is not supported by boto3 client") - return {} - - @reporter.step("Sync directory S3") - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - raise NotImplementedError("Sync is not supported for boto3 client") - - @reporter.step("CP directory S3") - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - raise NotImplementedError("Cp is not supported for boto3 client") - - @reporter.step("Create presign url for the object") - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - response = self._exec_request( - method=self.boto3_client.generate_presigned_url, - params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response - - # END OBJECT METHODS # - - # IAM METHODS # - # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) - - @reporter.step("Adds the specified user to the specified group") - def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.add_user_to_group, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Attaches the specified managed policy to the specified IAM group") - def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.attach_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Attaches the specified managed policy to the specified user") - def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.attach_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - def iam_create_access_key(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.create_access_key, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - access_key_id = response["AccessKey"].get("AccessKeyId") - secret_access_key = response["AccessKey"].get("SecretAccessKey") - assert access_key_id, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - - return access_key_id, secret_access_key - - @reporter.step("Creates a new group") - def iam_create_group(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.create_group, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("Group"), f"Expected Group in response:\n{response}" - assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - - return response - - @reporter.step("Creates a new managed policy for your AWS account") - def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals()) - params["PolicyDocument"] = json.dumps(policy_document) - response = self._exec_request( - self.boto3_iam_client.create_policy, - params, - # Overriding option for AWS CLI - policy_document=policy_document, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" - - return response - - @reporter.step("Creates a new IAM user for your AWS account") - def iam_create_user(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.create_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Deletes the access key pair associated with the specified IAM user") - def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.delete_access_key, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified IAM group") - def iam_delete_group(self, group_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.delete_group, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.delete_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified managed policy") - def iam_delete_policy(self, policy_arn: str) -> dict: - return self._exec_request( - self.boto3_iam_client.delete_policy, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified IAM user") - def iam_delete_user(self, user_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.delete_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.delete_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Removes the specified managed policy from the specified IAM group") - def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.detach_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Removes the specified managed policy from the specified user") - def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.detach_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Returns a list of IAM users that are in the specified IAM group") - def iam_get_group(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.get_group, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.get_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Retrieves information about the specified managed policy") - def iam_get_policy(self, policy_arn: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.get_policy, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" - - return response - - @reporter.step("Retrieves information about the specified version of the specified managed policy") - def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.get_policy_version, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" - assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" - - return response - - @reporter.step("Retrieves information about the specified IAM user") - def iam_get_user(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.get_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.get_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("UserName"), f"Expected UserName in response:\n{response}" - return response - - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - def iam_list_access_keys(self, user_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.list_access_keys, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Lists all managed policies that are attached to the specified IAM group") - def iam_list_attached_group_policies(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_attached_group_policies, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - return response - - @reporter.step("Lists all managed policies that are attached to the specified IAM user") - def iam_list_attached_user_policies(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_attached_user_policies, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - return response - - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_entities_for_policy, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" - assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" - - return response - - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - def iam_list_group_policies(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_group_policies, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - return response - - @reporter.step("Lists the IAM groups") - def iam_list_groups(self) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_groups, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - return response - - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - def iam_list_groups_for_user(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_groups_for_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - return response - - @reporter.step("Lists all the managed policies that are available in your AWS account") - def iam_list_policies(self) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_policies, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Policies"), f"Expected Policies in response:\n{response}" - return response - - @reporter.step("Lists information about the versions of the specified managed policy") - def iam_list_policy_versions(self, policy_arn: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_policy_versions, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Versions"), f"Expected Versions in response:\n{response}" - return response - - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - def iam_list_user_policies(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_user_policies, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - return response - - @reporter.step("Lists the IAM users") - def iam_list_users(self) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_users, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Users"), f"Expected Users in response:\n{response}" - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals()) - params["PolicyDocument"] = json.dumps(policy_document) - response = self._exec_request( - self.boto3_iam_client.put_group_policy, - params, - # Overriding option for AWS CLI - policy_document=policy_document, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals()) - params["PolicyDocument"] = json.dumps(policy_document) - response = self._exec_request( - self.boto3_iam_client.put_user_policy, - params, - # Overriding option for AWS CLI - policy_document=policy_document, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Removes the specified user from the specified group") - def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.remove_user_from_group, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Updates the name and/or the path of the specified IAM group") - def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} - return self._exec_request( - self.boto3_iam_client.update_group, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Updates the name and/or the path of the specified IAM user") - def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} - return self._exec_request( - self.boto3_iam_client.update_user, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Adds one or more tags to an IAM user") - def iam_tag_user(self, user_name: str, tags: list) -> dict: - params = self._convert_to_s3_params(locals()) - params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - return self._exec_request( - self.boto3_iam_client.tag_user, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("List tags of IAM user") - def iam_list_user_tags(self, user_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.list_user_tags, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Removes the specified tags from the user") - def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.untag_user, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - # MFA methods - @reporter.step("Creates a new virtual MFA device") - def iam_create_virtual_mfa_device( - self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None - ) -> tuple: - response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) - - serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") - base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") - assert serial_number, f"Expected SerialNumber in response:\n{response}" - assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" - - return serial_number, base32StringSeed - - @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) - - return response - - @reporter.step("Deletes a virtual MFA device") - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) - - return response - - @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - response = self.boto3_iam_client.enable_mfa_device( - UserName=user_name, - SerialNumber=serial_number, - AuthenticationCode1=authentication_code1, - AuthenticationCode2=authentication_code2, - ) - - return response - - @reporter.step("Lists the MFA devices for an IAM user") - def iam_list_virtual_mfa_devices(self) -> dict: - response = self.boto3_iam_client.list_virtual_mfa_devices() - assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" - - return response - - @reporter.step("Get session token for user") - def sts_get_session_token( - self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" - ) -> tuple: - response = self.boto3_sts_client.get_session_token( - DurationSeconds=duration_seconds, - SerialNumber=serial_number, - TokenCode=token_code, - ) - - access_key = response.get("Credentials", {}).get("AccessKeyId") - secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") - session_token = response.get("Credentials", {}).get("SessionToken") - assert access_key, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - assert session_token, f"Expected SessionToken in response:\n{response}" - - return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py deleted file mode 100644 index 4d845cf..0000000 --- a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py +++ /dev/null @@ -1,16 +0,0 @@ -import re - -from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.clients.s3 import BucketContainerResolver -from frostfs_testlib.storage.cluster import ClusterNode - - -class CurlBucketContainerResolver(BucketContainerResolver): - def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: - curl = GenericCli("curl", node.host) - output = curl(f"-I http://127.0.0.1:8084/{bucket_name}") - pattern = r"X-Container-Id: (\S+)" - cid = re.findall(pattern, output.stdout) - if cid: - return cid[0] - return None diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py deleted file mode 100644 index 0d03a28..0000000 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ /dev/null @@ -1,623 +0,0 @@ -from abc import ABC, abstractmethod -from datetime import datetime -from typing import Literal, Optional, Union - -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum -from frostfs_testlib.utils.file_utils import TestFile - - -def _make_objs_dict(key_names): - objs_list = [] - for key in key_names: - obj_dict = {"Key": key} - objs_list.append(obj_dict) - objs_dict = {"Objects": objs_list} - return objs_dict - - -class VersioningStatus(HumanReadableEnum): - UNDEFINED = None - ENABLED = "Enabled" - SUSPENDED = "Suspended" - - -class ACL: - PRIVATE = "private" - PUBLIC_READ = "public-read" - PUBLIC_READ_WRITE = "public-read-write" - AUTHENTICATED_READ = "authenticated-read" - AWS_EXEC_READ = "aws-exec-read" - BUCKET_OWNER_READ = "bucket-owner-read" - BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" - LOG_DELIVERY_WRITE = "log-delivery-write" - - -class BucketContainerResolver(ABC): - @abstractmethod - def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: - """ - Resolve Container ID from bucket name - - Args: - node: node from where we want to resolve - bucket_name: name of the bucket - **kwargs: any other required params - - Returns: Container ID - """ - raise NotImplementedError("Call from abstract class") - - -class S3ClientWrapper(HumanReadableABC): - access_key_id: str - secret_access_key: str - profile: str - region: str - - s3gate_endpoint: str - iam_endpoint: str - - @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: - pass - - @abstractmethod - def set_endpoint(self, s3gate_endpoint: str): - """Set endpoint""" - - @abstractmethod - def set_iam_endpoint(self, iam_endpoint: str): - """Set iam endpoint""" - - @abstractmethod - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - """Create a bucket.""" - - # BUCKET METHODS # - - @abstractmethod - def list_buckets(self) -> list[str]: - """List buckets.""" - - @abstractmethod - def delete_bucket(self, bucket: str) -> None: - """Delete bucket""" - - @abstractmethod - def head_bucket(self, bucket: str) -> None: - """This action is useful to determine if a bucket exists and you have permission to access it. - The action returns a 200 OK if the bucket exists and you have permission to access it. - - If the bucket does not exist or you do not have permission to access it, the HEAD request - returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. - A message body is not included, so you cannot determine the exception beyond these error codes. - """ - - @abstractmethod - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - """Sets the versioning state of an existing bucket. - - You can set the versioning state with one of the following values: - - Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. - - Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. - - If the versioning state has never been set on a bucket, it has no versioning state - """ - - @abstractmethod - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - """Returns the versioning state of a bucket. - - To retrieve the versioning state of a bucket, you must be the bucket owner. - """ - - @abstractmethod - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - """Sets the tags for a bucket.""" - - @abstractmethod - def get_bucket_tagging(self, bucket: str) -> list: - """Returns the tag set associated with the Outposts bucket.""" - - @abstractmethod - def delete_bucket_tagging(self, bucket: str) -> None: - """Deletes the tags from the bucket.""" - - @abstractmethod - def get_bucket_acl(self, bucket: str) -> dict: - """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" - - @abstractmethod - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - """Sets the permissions on an existing bucket using access control lists (ACL).""" - - @abstractmethod - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - """Places an Object Lock configuration on the specified bucket. - The rule specified in the Object Lock configuration will be applied by - default to every new object placed in the specified bucket.""" - - @abstractmethod - def get_object_lock_configuration(self, bucket: str) -> dict: - """Gets the Object Lock configuration for a bucket. - The rule specified in the Object Lock configuration will be applied by - default to every new object placed in the specified bucket.""" - - @abstractmethod - def get_bucket_policy(self, bucket: str) -> str: - """Returns the policy of a specified bucket.""" - - @abstractmethod - def delete_bucket_policy(self, bucket: str) -> str: - """Deletes the policy of a specified bucket.""" - - @abstractmethod - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - """Applies S3 bucket policy to an S3 bucket.""" - - @abstractmethod - def get_bucket_cors(self, bucket: str) -> dict: - """Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.""" - - @abstractmethod - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - """Sets the cors configuration for your bucket. If the configuration exists, S3 replaces it.""" - - @abstractmethod - def delete_bucket_cors(self, bucket: str) -> None: - """Deletes the cors configuration information set for the bucket.""" - - @abstractmethod - def get_bucket_location(self, bucket: str) -> str: - """Returns the LocationConstraint the bucket resides in. You can set the it - using the LocationConstraint request parameter in a CreateBucket request.""" - - # END OF BUCKET METHODS # - - # OBJECT METHODS # - - @abstractmethod - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - """Returns some or all (up to 1,000) of the objects in a bucket with each request. - You can use the request parameters as selection criteria to return a subset of the objects in a bucket. - A 200 OK response can contain valid or invalid XML. Make sure to design your application - to parse the contents of the response and handle it appropriately. - """ - - @abstractmethod - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: - """Returns some or all (up to 1,000) of the objects in a bucket with each request. - You can use the request parameters as selection criteria to return a subset of the objects in a bucket. - A 200 OK response can contain valid or invalid XML. Make sure to design your application - to parse the contents of the response and handle it appropriately. - """ - - @abstractmethod - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - """Returns metadata about all versions of the objects in a bucket.""" - - @abstractmethod - def list_delete_markers(self, bucket: str, full_output: bool = False) -> dict: - """Returns metadata about all delete markers of the objects in a bucket.""" - - @abstractmethod - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - """Adds an object to a bucket.""" - - @abstractmethod - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - """The HEAD action retrieves metadata from an object without returning the object itself. - This action is useful if you're only interested in an object's metadata.""" - - @abstractmethod - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - """Removes the null version (if there is one) of an object and inserts a delete marker, - which becomes the latest version of the object. If there isn't a null version, - S3 does not remove any objects but will still respond that the command was successful.""" - - @abstractmethod - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - """This action enables you to delete multiple objects from a bucket - using a single HTTP request. If you know the object keys that - you want to delete, then this action provides a suitable alternative - to sending individual delete requests, reducing per-request overhead. - - The request contains a list of up to 1000 keys that you want to delete.""" - - @abstractmethod - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - """Delete object versions""" - - @abstractmethod - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - """Delete object versions without delete markers""" - - @abstractmethod - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - """Uses the acl subresource to set the access control - list (ACL) permissions for a new or existing object in an S3 bucket.""" - - @abstractmethod - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - """Returns the access control list (ACL) of an object.""" - - @abstractmethod - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - """Creates a copy of an object""" - - @abstractmethod - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> dict | TestFile: - """Retrieves objects from S3.""" - - @abstractmethod - def create_multipart_upload(self, bucket: str, key: str) -> str: - """This action initiates a multipart upload and returns an upload ID. - This upload ID is used to associate all of the parts in the specific multipart upload. - You specify this upload ID in each of your subsequent upload part requests (see UploadPart). - You also include this upload ID in the final request to either complete or abort the multipart upload request.""" - - @abstractmethod - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - """This action lists in-progress multipart uploads. - An in-progress multipart upload is a multipart upload that has been initiated - using the Initiate Multipart Upload request, but has not yet been completed or aborted. - - This action returns at most 1,000 multipart uploads in the response.""" - - @abstractmethod - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - """This action aborts a multipart upload. After a multipart upload is aborted, - no additional parts can be uploaded using that upload ID. - The storage consumed by any previously uploaded parts will be freed. - However, if any part uploads are currently in progress, those part - uploads might or might not succeed. As a result, it might be necessary to - abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" - - @abstractmethod - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - """Uploads a part in a multipart upload.""" - - @abstractmethod - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - """Uploads a part by copying data from an existing object as data source.""" - - @abstractmethod - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - """Lists the parts that have been uploaded for a specific multipart upload.""" - - @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: - """Completes a multipart upload by assembling previously uploaded parts.""" - - @abstractmethod - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - """Places an Object Retention configuration on an object.""" - - @abstractmethod - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - """Applies a legal hold configuration to the specified object.""" - - @abstractmethod - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: - """Sets the tag-set for an object.""" - - @abstractmethod - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - """Returns the tag-set of an object.""" - - @abstractmethod - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: - """Removes the entire tag set from the specified object.""" - - @abstractmethod - def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - """Adds or updates bucket lifecycle configuration""" - - @abstractmethod - def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - """Gets bucket lifecycle configuration""" - - @abstractmethod - def delete_bucket_lifecycle(self, bucket: str) -> dict: - """Deletes bucket lifecycle""" - - @abstractmethod - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: str = "", - max_parts: int = 0, - part_number: int = 0, - full_output: bool = True, - ) -> dict: - """Retrieves all the metadata from an object without returning the object itself.""" - - @abstractmethod - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - """sync directory TODO: Add proper description""" - - @abstractmethod - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - """cp directory TODO: Add proper description""" - - @abstractmethod - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - """Creates presign URL""" - - # END OF OBJECT METHODS # - - # IAM METHODS # - - @abstractmethod - def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - """Adds the specified user to the specified group""" - - @abstractmethod - def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - """Attaches the specified managed policy to the specified IAM group""" - - @abstractmethod - def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - """Attaches the specified managed policy to the specified user""" - - @abstractmethod - def iam_create_access_key(self, user_name: str) -> dict: - """Creates a new AWS secret access key and access key ID for the specified user""" - - @abstractmethod - def iam_create_group(self, group_name: str) -> dict: - """Creates a new group""" - - @abstractmethod - def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - """Creates a new managed policy for your AWS account""" - - @abstractmethod - def iam_create_user(self, user_name: str) -> dict: - """Creates a new IAM user for your AWS account""" - - @abstractmethod - def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - """Deletes the access key pair associated with the specified IAM user""" - - @abstractmethod - def iam_delete_group(self, group_name: str) -> dict: - """Deletes the specified IAM group""" - - @abstractmethod - def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - """Deletes the specified inline policy that is embedded in the specified IAM group""" - - @abstractmethod - def iam_delete_policy(self, policy_arn: str) -> dict: - """Deletes the specified managed policy""" - - @abstractmethod - def iam_delete_user(self, user_name: str) -> dict: - """Deletes the specified IAM user""" - - @abstractmethod - def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - """Deletes the specified inline policy that is embedded in the specified IAM user""" - - @abstractmethod - def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - """Removes the specified managed policy from the specified IAM group""" - - @abstractmethod - def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - """Removes the specified managed policy from the specified user""" - - @abstractmethod - def iam_get_group(self, group_name: str) -> dict: - """Returns a list of IAM users that are in the specified IAM group""" - - @abstractmethod - def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - """Retrieves the specified inline policy document that is embedded in the specified IAM group""" - - @abstractmethod - def iam_get_policy(self, policy_arn: str) -> dict: - """Retrieves information about the specified managed policy""" - - @abstractmethod - def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - """Retrieves information about the specified version of the specified managed policy""" - - @abstractmethod - def iam_get_user(self, user_name: str) -> dict: - """Retrieves information about the specified IAM user""" - - @abstractmethod - def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - """Retrieves the specified inline policy document that is embedded in the specified IAM user""" - - @abstractmethod - def iam_list_access_keys(self, user_name: str) -> dict: - """Returns information about the access key IDs associated with the specified IAM user""" - - @abstractmethod - def iam_list_attached_group_policies(self, group_name: str) -> dict: - """Lists all managed policies that are attached to the specified IAM group""" - - @abstractmethod - def iam_list_attached_user_policies(self, user_name: str) -> dict: - """Lists all managed policies that are attached to the specified IAM user""" - - @abstractmethod - def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - """Lists all IAM users, groups, and roles that the specified managed policy is attached to""" - - @abstractmethod - def iam_list_group_policies(self, group_name: str) -> dict: - """Lists the names of the inline policies that are embedded in the specified IAM group""" - - @abstractmethod - def iam_list_groups(self) -> dict: - """Lists the IAM groups""" - - @abstractmethod - def iam_list_groups_for_user(self, user_name: str) -> dict: - """Lists the IAM groups that the specified IAM user belongs to""" - - @abstractmethod - def iam_list_policies(self) -> dict: - """Lists all the managed policies that are available in your AWS account""" - - @abstractmethod - def iam_list_policy_versions(self, policy_arn: str) -> dict: - """Lists information about the versions of the specified managed policy""" - - @abstractmethod - def iam_list_user_policies(self, user_name: str) -> dict: - """Lists the names of the inline policies embedded in the specified IAM user""" - - @abstractmethod - def iam_list_users(self) -> dict: - """Lists the IAM users""" - - @abstractmethod - def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - """Adds or updates an inline policy document that is embedded in the specified IAM group""" - - @abstractmethod - def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - """Adds or updates an inline policy document that is embedded in the specified IAM user""" - - @abstractmethod - def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - """Removes the specified user from the specified group""" - - @abstractmethod - def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - """Updates the name and/or the path of the specified IAM group""" - - @abstractmethod - def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - """Updates the name and/or the path of the specified IAM user""" - - @abstractmethod - def iam_tag_user(self, user_name: str, tags: list) -> dict: - """Adds one or more tags to an IAM user""" - - @abstractmethod - def iam_list_user_tags(self, user_name: str) -> dict: - """List tags of IAM user""" - - @abstractmethod - def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - """Removes the specified tags from the user""" - - # MFA methods - @abstractmethod - def iam_create_virtual_mfa_device( - self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None - ) -> tuple: - """Creates a new virtual MFA device""" - - @abstractmethod - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - """Deactivates the specified MFA device and removes it from association with the user name""" - - @abstractmethod - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - """Deletes a virtual MFA device""" - - @abstractmethod - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - """Enables the specified MFA device and associates it with the specified IAM user""" - - @abstractmethod - def iam_list_virtual_mfa_devices(self) -> dict: - """Lists the MFA devices for an IAM user""" - - @abstractmethod - def sts_get_session_token( - self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None - ) -> tuple: - """Get session token for user""" diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py deleted file mode 100644 index f6f423d..0000000 --- a/src/frostfs_testlib/clients/s3/s3_http_client.py +++ /dev/null @@ -1,149 +0,0 @@ -import hashlib -import logging -import xml.etree.ElementTree as ET - -import httpx -from botocore.auth import SigV4Auth -from botocore.awsrequest import AWSRequest -from botocore.credentials import Credentials - -from frostfs_testlib import reporter -from frostfs_testlib.clients import HttpClient -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") - -DEFAULT_TIMEOUT = 60.0 - - -class S3HttpClient: - def __init__( - self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.http_client = HttpClient() - self.credentials = Credentials(access_key_id, secret_access_key) - self.profile = profile - self.region = region - - self.iam_endpoint: str = None - self.s3gate_endpoint: str = None - self.service: str = None - self.signature: SigV4Auth = None - - self.set_endpoint(s3gate_endpoint) - - def _to_s3_header(self, header: str) -> dict: - replacement_map = { - "Acl": "ACL", - "_": "-", - } - - result = header - if not header.startswith("x_amz"): - result = header.title() - - for find, replace in replacement_map.items(): - result = result.replace(find, replace) - - return result - - def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): - exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] - return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} - - def _create_aws_request( - self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None - ) -> AWSRequest: - data = b"" - - if content is not None: - if isinstance(content, TestFile): - with open(content, "rb") as io_content: - data = io_content.read() - elif isinstance(content, str): - data = bytes(content, encoding="utf-8") - elif isinstance(content, bytes): - data = content - else: - raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") - - headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() - aws_request = AWSRequest(method, url, headers, data, params) - self.signature.add_auth(aws_request) - - return aws_request - - def _exec_request( - self, - method: str, - url: str, - headers: dict, - content: str | bytes | TestFile = None, - params: dict = None, - timeout: float = DEFAULT_TIMEOUT, - ) -> dict: - aws_request = self._create_aws_request(method, url, headers, content, params) - response = self.http_client.send( - aws_request.method, - aws_request.url, - headers=dict(aws_request.headers), - data=aws_request.data, - params=aws_request.params, - timeout=timeout, - ) - - try: - response.raise_for_status() - except httpx.HTTPStatusError: - raise httpx.HTTPStatusError(response.text, request=response.request, response=response) - - root = ET.fromstring(response.read()) - data = { - "LastModified": root.find(".//LastModified").text, - "ETag": root.find(".//ETag").text, - } - - if response.headers.get("x-amz-version-id"): - data["VersionId"] = response.headers.get("x-amz-version-id") - - return data - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - if self.s3gate_endpoint == s3gate_endpoint: - return - - self.s3gate_endpoint = s3gate_endpoint - self.service = "s3" - self.signature = SigV4Auth(self.credentials, self.service, self.region) - - @reporter.step("Set endpoint IAM to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - if self.iam_endpoint == iam_endpoint: - return - - self.iam_endpoint = iam_endpoint - self.service = "iam" - self.signature = SigV4Auth(self.credentials, self.service, self.region) - - @reporter.step("Patch object S3") - def patch_object( - self, - bucket: str, - key: str, - content: str | bytes | TestFile, - content_range: str, - version_id: str = None, - if_match: str = None, - if_unmodified_since: str = None, - x_amz_expected_bucket_owner: str = None, - timeout: float = DEFAULT_TIMEOUT, - ) -> dict: - if content_range and not content_range.startswith("bytes"): - content_range = f"bytes {content_range}/*" - - url = f"{self.s3gate_endpoint}/{bucket}/{key}" - headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) - params = {"VersionId": version_id} if version_id is not None else None - - return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py deleted file mode 100644 index ed6454b..0000000 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ /dev/null @@ -1,47 +0,0 @@ -import re -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.shell import LocalShell -from frostfs_testlib.steps.cli.container import list_containers -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate -from frostfs_testlib.utils import string_utils - - -class AuthmateS3CredentialsProvider(S3CredentialsProvider): - @reporter.step("Init S3 Credentials using Authmate CLI") - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: - cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes - shell = LocalShell() - wallet = user.wallet - endpoint = cluster_node.storage_node.get_rpc_endpoint() - - gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] - # unique short bucket name - bucket = string_utils.unique_name("bucket-") - - frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate.secret.issue( - wallet=wallet.path, - peer=endpoint, - gate_public_key=gate_public_keys, - wallet_password=wallet.password, - container_policy=location_constraints, - container_friendly_name=bucket, - ).stdout - - aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id")) - aws_secret_access_key = str( - re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group("aws_secret_access_key") - ) - cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) - - containers_list = list_containers(wallet, shell, endpoint) - assert cid in containers_list, f"Expected cid {cid} in {containers_list}" - - user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) - return user.s3_credentials diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py deleted file mode 100644 index b2ae6f1..0000000 --- a/src/frostfs_testlib/credentials/interfaces.py +++ /dev/null @@ -1,51 +0,0 @@ -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Any, Optional - -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - - -@dataclass -class S3Credentials: - access_key: str - secret_key: str - - -@dataclass -class User: - name: str - attributes: dict[str, Any] = field(default_factory=dict) - wallet: WalletInfo | None = None - s3_credentials: S3Credentials | None = None - - -class S3CredentialsProvider(ABC): - def __init__(self, cluster: Cluster) -> None: - self.cluster = cluster - - @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: - raise NotImplementedError("Directly called abstract class?") - - -class GrpcCredentialsProvider(ABC): - def __init__(self, cluster: Cluster) -> None: - self.cluster = cluster - - @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: - raise NotImplementedError("Directly called abstract class?") - - -class CredentialsProvider(object): - S3: S3CredentialsProvider - GRPC: GrpcCredentialsProvider - - def __init__(self, cluster: Cluster) -> None: - config = cluster.cluster_nodes[0].host.config - s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name) - self.S3 = s3_cls(cluster) - grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name) - self.GRPC = grpc_cls(cluster) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py deleted file mode 100644 index d00020f..0000000 --- a/src/frostfs_testlib/credentials/wallet_factory_provider.py +++ /dev/null @@ -1,14 +0,0 @@ -from frostfs_testlib import reporter -from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS -from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo - - -class WalletFactoryProvider(GrpcCredentialsProvider): - @reporter.step("Init gRPC Credentials using wallet generation") - def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: - wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) - user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS) - return user.wallet diff --git a/src/frostfs_testlib/defaults.py b/src/frostfs_testlib/defaults.py deleted file mode 100644 index 22097be..0000000 --- a/src/frostfs_testlib/defaults.py +++ /dev/null @@ -1,10 +0,0 @@ -class Options: - DEFAULT_SHELL_TIMEOUT = 120 - - @staticmethod - def get_default_shell_timeout(): - return Options.DEFAULT_SHELL_TIMEOUT - - @staticmethod - def set_default_shell_timeout(value: int): - Options.DEFAULT_SHELL_TIMEOUT = value diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py deleted file mode 100644 index 7d767d2..0000000 --- a/src/frostfs_testlib/fixtures.py +++ /dev/null @@ -1,52 +0,0 @@ -import logging -import os -from datetime import datetime -from importlib.metadata import entry_points - -import pytest -import yaml - -from frostfs_testlib import reporter -from frostfs_testlib.hosting.hosting import Hosting -from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE -from frostfs_testlib.storage import get_service_registry - - -@pytest.fixture(scope="session", autouse=True) -def session_start_time(): - start_time = datetime.utcnow() - return start_time - - -@pytest.fixture(scope="session") -def configure_testlib(): - reporter.get_reporter().register_handler(reporter.AllureHandler()) - reporter.get_reporter().register_handler(reporter.StepsLogger()) - logging.getLogger("paramiko").setLevel(logging.INFO) - - # Register Services for cluster - registry = get_service_registry() - services = entry_points(group="frostfs.testlib.services") - for svc in services: - registry.register_service(svc.name, svc.load()) - - -@pytest.fixture(scope="session") -def temp_directory(configure_testlib): - with reporter.step("Prepare tmp directory"): - full_path = ASSETS_DIR - if not os.path.exists(full_path): - os.mkdir(full_path) - - return full_path - - -@pytest.fixture(scope="session") -def hosting(configure_testlib) -> Hosting: - with open(HOSTING_CONFIG_FILE, "r") as file: - hosting_config = yaml.full_load(file) - - hosting_instance = Hosting() - hosting_instance.configure(hosting_config) - - return hosting_instance diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py deleted file mode 100644 index fc7ba59..0000000 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ /dev/null @@ -1,109 +0,0 @@ -from typing import Callable - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.steps.node_management import storage_node_healthcheck -from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils.failover_utils import check_services_status - - -class BasicHealthcheck(Healthcheck): - def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): - issues: list[str] = [] - for check, kwargs in checks.items(): - issue = check(cluster_node, **kwargs) - if issue: - issues.append(issue) - - assert not issues, "Issues found:\n" + "\n".join(issues) - - @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") - def full_healthcheck(self, cluster_node: ClusterNode): - checks = { - self.storage_healthcheck: {}, - self._tree_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") - def startup_healthcheck(self, cluster_node: ClusterNode): - checks = { - self.storage_healthcheck: {}, - self._tree_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") - def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: - checks = { - self._storage_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}") - def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: - checks = { - self._tree_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") - def services_healthcheck(self, cluster_node: ClusterNode): - svcs_to_check = cluster_node.services - checks = { - check_services_status: { - "service_list": svcs_to_check, - "expected_status": "active", - }, - self._check_services: {"services": svcs_to_check}, - } - - self._perform(cluster_node, checks) - - def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): - for svc in services: - result = svc.service_healthcheck() - if result == False: - return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." - - @reporter.step("Storage healthcheck on {cluster_node}") - def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: - result = storage_node_healthcheck(cluster_node.storage_node) - self._gather_socket_info(cluster_node) - if result.health_status != "READY" or result.network_status != "ONLINE": - return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" - - @reporter.step("Tree healthcheck on {cluster_node}") - def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: - host = cluster_node.host - service_config = host.get_service_config(cluster_node.storage_node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - remote_cli = FrostfsCli( - shell, - host.get_cli_config(FROSTFS_CLI_EXEC).exec_path, - config_file=wallet_config_path, - ) - result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") - if result.return_code != 0: - return ( - f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" - ) - - @reporter.step("Gather socket info for {cluster_node}") - def _gather_socket_info(self, cluster_node: ClusterNode): - cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py deleted file mode 100644 index cf17852..0000000 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ /dev/null @@ -1,25 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.storage.cluster import ClusterNode - - -class Healthcheck(ABC): - @abstractmethod - def full_healthcheck(self, cluster_node: ClusterNode): - """Perform full healthcheck on the target cluster node""" - - @abstractmethod - def startup_healthcheck(self, cluster_node: ClusterNode): - """Perform healthcheck required on startup of target cluster node""" - - @abstractmethod - def storage_healthcheck(self, cluster_node: ClusterNode): - """Perform storage service healthcheck on target cluster node""" - - @abstractmethod - def services_healthcheck(self, cluster_node: ClusterNode): - """Perform service status check on target cluster node""" - - @abstractmethod - def tree_healthcheck(self, cluster_node: ClusterNode): - """Perform tree healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py deleted file mode 100644 index d7e4cc8..0000000 --- a/src/frostfs_testlib/hooks.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest - - -@pytest.hookimpl(specname="pytest_collection_modifyitems") -def pytest_add_frostfs_marker(items: list[pytest.Item]): - # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding - # nodeid = full path of the test - # 1. plugins - # 2. testlib itself - for item in items: - location = item.location[0] - if "frostfs" in location and "plugin" not in location and "testlib" not in location: - item.add_marker("frostfs") - - -# pytest hook. Do not rename -@pytest.hookimpl(trylast=True) -def pytest_collection_modifyitems(items: list[pytest.Item]): - # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.location[0]) - - # Change order of tests based on @pytest.mark.order() marker - def order(item: pytest.Item) -> int: - order_marker = item.get_closest_marker("order") - if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): - raise RuntimeError("Incorrect usage of pytest.mark.order") - - order_value = order_marker.args[0] if order_marker else 0 - return order_value - - items.sort(key=lambda item: order(item)) diff --git a/src/frostfs_testlib/hosting/__init__.py b/src/frostfs_testlib/hosting/__init__.py deleted file mode 100644 index 22a55c7..0000000 --- a/src/frostfs_testlib/hosting/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig -from frostfs_testlib.hosting.hosting import Hosting -from frostfs_testlib.hosting.interfaces import Host diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py deleted file mode 100644 index 6cdee39..0000000 --- a/src/frostfs_testlib/hosting/config.py +++ /dev/null @@ -1,77 +0,0 @@ -from dataclasses import dataclass, field, fields -from typing import Any - - -@dataclass -class ParsedAttributes: - """Base class for data structures representing parsed attributes from configs.""" - - @classmethod - def parse(cls, attributes: dict[str, Any]): - # Pick attributes supported by the class - field_names = set(field.name for field in fields(cls)) - supported_attributes = {key: value for key, value in attributes.items() if key in field_names} - return cls(**supported_attributes) - - -@dataclass -class CLIConfig: - """Describes CLI tool on some host. - - Attributes: - name: Name of the tool. - exec_path: Path to executable file of the tool. - attributes: Dict with extra information about the tool. - """ - - name: str - exec_path: str - attributes: dict[str, str] = field(default_factory=dict) - extra_args: list[str] = field(default_factory=list) - - -@dataclass -class ServiceConfig: - """Describes frostFS service on some host. - - Attributes: - name: Name of the service that uniquely identifies it across all hosts. - attributes: Dict with extra information about the service. For example, we can store - name of docker container (or name of systemd service), endpoints, path to wallet, - path to configuration file, etc. - """ - - name: str - attributes: dict[str, str] = field(default_factory=dict) - - -@dataclass -class HostConfig: - """Describes machine that hosts frostFS services. - - Attributes: - plugin_name: Name of plugin that should be used to manage the host. - healthcheck_plugin_name: Name of the plugin for healthcheck operations. - address: Address of the machine (IP or DNS name). - services: List of services hosted on the machine. - clis: List of CLI tools available on the machine. - attributes: Dict with extra information about the host. For example, we can store - connection parameters in this dict. - """ - - plugin_name: str - hostname: str - healthcheck_plugin_name: str - address: str - s3_creds_plugin_name: str = field(default="authmate") - grpc_creds_plugin_name: str = field(default="wallet_factory") - product: str = field(default="frostfs") - services: list[ServiceConfig] = field(default_factory=list) - clis: list[CLIConfig] = field(default_factory=list) - attributes: dict[str, str] = field(default_factory=dict) - interfaces: dict[str, str] = field(default_factory=dict) - environment: dict[str, str] = field(default_factory=dict) - - def __post_init__(self) -> None: - self.services = [ServiceConfig(**service) for service in self.services or []] - self.clis = [CLIConfig(**cli) for cli in self.clis or []] diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py deleted file mode 100644 index d458b0a..0000000 --- a/src/frostfs_testlib/hosting/docker_host.py +++ /dev/null @@ -1,342 +0,0 @@ -import json -import logging -import os -import re -import time -from dataclasses import dataclass -from datetime import datetime -from typing import Any, Optional - -import docker -from requests import HTTPError - -from frostfs_testlib.hosting.config import ParsedAttributes -from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus -from frostfs_testlib.shell import LocalShell, Shell, SSHShell -from frostfs_testlib.shell.command_inspectors import SudoInspector - -logger = logging.getLogger("frostfs.testlib.hosting") - - -@dataclass -class HostAttributes(ParsedAttributes): - """Represents attributes of host where Docker with frostFS runs. - - Attributes: - sudo_shell: Specifies whether shell commands should be auto-prefixed with sudo. - docker_endpoint: Protocol, address and port of docker where frostFS runs. Recommended format - is tcp socket (https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option), - for example: tcp://{address}:2375 (where 2375 is default docker port). - ssh_login: Login for SSH connection to the machine where docker runs. - ssh_password: Password for SSH connection. - ssh_private_key_path: Path to private key for SSH connection. - ssh_private_key_passphrase: Passphrase for the private key. - """ - - sudo_shell: bool = False - docker_endpoint: Optional[str] = None - ssh_login: Optional[str] = None - ssh_password: Optional[str] = None - ssh_private_key_path: Optional[str] = None - ssh_private_key_passphrase: Optional[str] = None - - -@dataclass -class ServiceAttributes(ParsedAttributes): - """Represents attributes of service running as Docker container. - - Attributes: - container_name: Name of Docker container where the service runs. - volume_name: Name of volume where storage node service stores the data. - start_timeout: Timeout (in seconds) for service to start. - stop_timeout: Timeout (in seconds) for service to stop. - """ - - container_name: str - volume_name: Optional[str] = None - start_timeout: int = 90 - stop_timeout: int = 90 - - -class DockerHost(Host): - """Manages services hosted in Docker containers running on a local or remote machine.""" - - def get_shell(self, sudo: bool = False) -> Shell: - host_attributes = HostAttributes.parse(self._config.attributes) - command_inspectors = [] - if sudo: - command_inspectors.append(SudoInspector()) - - if not host_attributes.ssh_login: - # If there is no SSH connection to the host, use local shell - return LocalShell(command_inspectors) - - # If there is SSH connection to the host, use SSH shell - return SSHShell( - host=self._config.address, - login=host_attributes.ssh_login, - password=host_attributes.ssh_password, - private_key_path=host_attributes.ssh_private_key_path, - private_key_passphrase=host_attributes.ssh_private_key_passphrase, - command_inspectors=command_inspectors, - ) - - def start_host(self) -> None: - # We emulate starting machine by starting all services - # As an alternative we can probably try to stop docker service... - for service_config in self._config.services: - self.start_service(service_config.name) - - def get_host_status(self) -> HostStatus: - # We emulate host status by checking all services. - for service_config in self._config.services: - state = self._get_container_state(service_config.name) - if state != "running": - return HostStatus.OFFLINE - - return HostStatus.ONLINE - - def stop_host(self) -> None: - # We emulate stopping machine by stopping all services - # As an alternative we can probably try to stop docker service... - for service_config in self._config.services: - self.stop_service(service_config.name) - - def start_service(self, service_name: str) -> None: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - client.start(service_attributes.container_name) - - self._wait_for_container_to_be_in_state( - container_name=service_attributes.container_name, - expected_state="running", - timeout=service_attributes.start_timeout, - ) - - def stop_service(self, service_name: str) -> None: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - client.stop(service_attributes.container_name) - - self._wait_for_container_to_be_in_state( - container_name=service_attributes.container_name, - expected_state="exited", - timeout=service_attributes.stop_timeout, - ) - - def mask_service(self, service_name: str) -> None: - # Not required for Docker - return - - def unmask_service(self, service_name: str) -> None: - # Not required for Docker - return - - def wait_success_suspend_process(self, service_name: str): - raise NotImplementedError("Not supported for docker") - - def wait_success_resume_process(self, service_name: str): - raise NotImplementedError("Not supported for docker") - - def restart_service(self, service_name: str) -> None: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - client.restart(service_attributes.container_name) - - self._wait_for_container_to_be_in_state( - container_name=service_attributes.container_name, - expected_state="running", - timeout=service_attributes.start_timeout, - ) - - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: - raise NotImplementedError("Not implemented for docker") - - def get_data_directory(self, service_name: str) -> str: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - volume_info = client.inspect_volume(service_attributes.volume_name) - volume_path = volume_info["Mountpoint"] - - return volume_path - - def send_signal_to_service(self, service_name: str, signal: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_metabase(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_write_cache(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_fstree(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_blobovnicza(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_pilorama(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_file(self, file_path: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def is_file_exist(self, file_path: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def wipefs_storage_node_data(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def finish_wipefs(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: - volume_path = self.get_data_directory(service_name) - - shell = self.get_shell() - meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" - data_clean_cmd = f"; rm -rf {volume_path}/data*/*" if not cache_only else "" - cmd = f"{meta_clean_cmd}{data_clean_cmd}" - shell.exec(cmd) - - def attach_disk(self, device: str, disk_info: DiskInfo) -> None: - raise NotImplementedError("Not supported for docker") - - def detach_disk(self, device: str) -> DiskInfo: - raise NotImplementedError("Not supported for docker") - - def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: - raise NotImplementedError("Not supported for docker") - - def dump_logs( - self, - directory_path: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - filter_regex: Optional[str] = None, - ) -> None: - client = self._get_docker_client() - for service_config in self._config.services: - container_name = self._get_service_attributes(service_config.name).container_name - try: - logs = client.logs(container_name, since=since, until=until) - except HTTPError as exc: - logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") - continue - - if filter_regex: - logs = ( - "\n".join(match[0] for match in re.findall(filter_regex, logs, re.IGNORECASE)) - or f"No matches found in logs based on given filter '{filter_regex}'" - ) - - # Save logs to the directory - file_path = os.path.join( - directory_path, - f"{self._config.address}-{container_name}-log.txt", - ) - with open(file_path, "wb") as file: - file.write(logs) - - def get_filtered_logs( - self, - filter_regex: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - unit: Optional[str] = None, - exclude_filter: Optional[str] = None, - priority: Optional[str] = None, - word_count: bool = None, - ) -> str: - client = self._get_docker_client() - filtered_logs = "" - for service_config in self._config.services: - container_name = self._get_service_attributes(service_config.name).container_name - try: - filtered_logs = client.logs(container_name, since=since, until=until) - except HTTPError as exc: - logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") - continue - - if exclude_filter: - filtered_logs = filtered_logs.replace(exclude_filter, "") - matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) - found = list(matches) - - if found: - filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" - - return filtered_logs - - def is_message_in_logs( - self, - message_regex: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - unit: Optional[str] = None, - ) -> bool: - client = self._get_docker_client() - for service_config in self._config.services: - container_name = self._get_service_attributes(service_config.name).container_name - try: - logs = client.logs(container_name, since=since, until=until) - except HTTPError as exc: - logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") - continue - - if message_regex: - matches = re.findall(message_regex, logs, re.IGNORECASE) - if matches: - return True - - return False - - def _get_service_attributes(self, service_name) -> ServiceAttributes: - service_config = self.get_service_config(service_name) - return ServiceAttributes.parse(service_config.attributes) - - def _get_docker_client(self) -> docker.APIClient: - docker_endpoint = HostAttributes.parse(self._config.attributes).docker_endpoint - - if not docker_endpoint: - # Use default docker client that talks to unix socket - return docker.APIClient() - - # Otherwise use docker client that talks to specified endpoint - return docker.APIClient(base_url=docker_endpoint) - - def _get_container_by_name(self, container_name: str) -> dict[str, Any]: - client = self._get_docker_client() - containers = client.containers(all=True) - - for container in containers: - # Names in local docker environment are prefixed with / - clean_names = set(name.strip("/") for name in container["Names"]) - if container_name in clean_names: - return container - return None - - def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: - iterations = 10 - iteration_wait_time = timeout / iterations - - # To speed things up, we break timeout in smaller iterations and check container state - # several times. This way waiting stops as soon as container reaches the expected state - for _ in range(iterations): - state = self._get_container_state(container_name) - - if state == expected_state: - return - time.sleep(iteration_wait_time) - - raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") - - def _get_container_state(self, container_name: str) -> str: - container = self._get_container_by_name(container_name) - logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") - - return container.get("State", None) diff --git a/src/frostfs_testlib/hosting/hosting.py b/src/frostfs_testlib/hosting/hosting.py deleted file mode 100644 index eddf03c..0000000 --- a/src/frostfs_testlib/hosting/hosting.py +++ /dev/null @@ -1,107 +0,0 @@ -import re -from typing import Any - -from frostfs_testlib.hosting.config import HostConfig, ServiceConfig -from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.plugins import load_plugin - - -class Hosting: - """Hosting manages infrastructure where frostFS runs (machines and frostFS services).""" - - _hosts: list[Host] - _host_by_address: dict[str, Host] - _host_by_service_name: dict[str, Host] - - @property - def hosts(self) -> list[Host]: - """Returns all hosts registered in the hosting. - - Returns: - List of hosts. - """ - return self._hosts - - def configure(self, config: dict[str, Any]) -> None: - """Configures hosts from specified config. - - All existing hosts will be removed from the hosting. - - Args: - config: Dictionary with hosting configuration. - """ - hosts = [] - host_by_address = {} - host_by_service_name = {} - - host_configs = [HostConfig(**host_config) for host_config in config["hosts"]] - for host_config in host_configs: - host_class = load_plugin("frostfs.testlib.hosting", host_config.plugin_name) - host = host_class(host_config) - - hosts.append(host) - host_by_address[host_config.address] = host - - for service_config in host_config.services: - host_by_service_name[service_config.name] = host - - self._hosts = hosts - self._host_by_address = host_by_address - self._host_by_service_name = host_by_service_name - - def get_host_by_address(self, host_address: str) -> Host: - """Returns host with specified address. - - Args: - host_address: Address of the host. - - Returns: - Host that manages machine with specified address. - """ - host = self._host_by_address.get(host_address) - if host is None: - raise ValueError(f"Unknown host address: '{host_address}'") - return host - - def get_host_by_service(self, service_name: str) -> Host: - """Returns host where service with specified name is located. - - Args: - service_name: Name of the service. - - Returns: - Host that manages machine where service is located. - """ - host = self._host_by_service_name.get(service_name) - if host is None: - raise ValueError(f"Unknown service name: '{service_name}'") - return host - - def get_service_config(self, service_name: str) -> ServiceConfig: - """Returns config of service with specified name. - - Args: - service_name: Name of the service. - - Returns: - Config of the service. - """ - host = self.get_host_by_service(service_name) - return host.get_service_config(service_name) - - def find_service_configs(self, service_name_pattern: str) -> list[ServiceConfig]: - """Finds configs of services where service name matches specified regular expression. - - Args: - service_name_pattern - regular expression for service names. - - Returns: - List of service configs matched with the regular expression. - """ - service_configs = [ - service_config - for host in self.hosts - for service_config in host.config.services - if re.match(service_name_pattern, service_config.name) - ] - return service_configs diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py deleted file mode 100644 index a41161c..0000000 --- a/src/frostfs_testlib/hosting/interfaces.py +++ /dev/null @@ -1,398 +0,0 @@ -from abc import ABC, abstractmethod -from datetime import datetime -from typing import Optional - -from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig -from frostfs_testlib.shell.interfaces import Shell -from frostfs_testlib.testing.readable import HumanReadableEnum -from frostfs_testlib.testing.test_control import retry - - -class HostStatus(HumanReadableEnum): - ONLINE = "Online" - OFFLINE = "Offline" - UNKNOWN = "Unknown" - - -class DiskInfo(dict): - """Dict wrapper for disk_info for disk management commands.""" - - -class Host(ABC): - """Interface of a host machine where frostFS services are running. - - Allows to manage the machine and frostFS services that are hosted on it. - """ - - def __init__(self, config: HostConfig) -> None: - self._config = config - self._service_config_by_name = {service_config.name: service_config for service_config in config.services} - self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} - - def __repr__(self) -> str: - return self.config.address - - @property - def config(self) -> HostConfig: - """Returns config of the host. - - Returns: - Config of this host. - """ - return self._config - - def get_service_config(self, service_name: str) -> ServiceConfig: - """Returns config of service with specified name. - - The service must be hosted on this host. - - Args: - service_name: Name of the service. - - Returns: - Config of the service. - """ - service_config = self._service_config_by_name.get(service_name) - if service_config is None: - raise ValueError(f"Unknown service name: '{service_name}'") - return service_config - - def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: - """Returns config of CLI tool with specified name. - - The CLI must be located on this host. - - Args: - cli_name: Name of the CLI tool. - - Returns: - Config of the CLI tool. - """ - cli_config = self._cli_config_by_name.get(cli_name) - if cli_config is None and not allow_empty: - raise ValueError(f"Unknown CLI name: '{cli_name}'") - return cli_config - - @abstractmethod - def get_shell(self, sudo: bool = True) -> Shell: - """Returns shell to this host. - - Args: - sudo: if True, run all commands in shell with elevated rights - - Returns: - Shell that executes commands on this host. - """ - - @abstractmethod - def start_host(self) -> None: - """Starts the host machine.""" - - @abstractmethod - def get_host_status(self) -> HostStatus: - """Check host status.""" - - @abstractmethod - def stop_host(self, mode: str) -> None: - """Stops the host machine. - - Args: - mode: Specifies mode how host should be stopped. Mode might be host-specific. - """ - - @abstractmethod - def start_service(self, service_name: str) -> None: - """Starts the service with specified name and waits until it starts. - - The service must be hosted on this host. - - Args: - service_name: Name of the service to start. - """ - - @abstractmethod - def stop_service(self, service_name: str) -> None: - """Stops the service with specified name and waits until it stops. - - The service must be hosted on this host. - - Args: - service_name: Name of the service to stop. - """ - - @abstractmethod - def send_signal_to_service(self, service_name: str, signal: str) -> None: - """Send signal to service with specified name using kill - - - The service must be hosted on this host. - - Args: - service_name: Name of the service to stop. - signal: signal name. See kill -l to all names - """ - - @abstractmethod - def mask_service(self, service_name: str) -> None: - """Prevent the service from start by any activity by masking it. - - The service must be hosted on this host. - - Args: - service_name: Name of the service to mask. - """ - - @abstractmethod - def unmask_service(self, service_name: str) -> None: - """Allow the service to start by any activity by unmasking it. - - The service must be hosted on this host. - - Args: - service_name: Name of the service to unmask. - """ - - @abstractmethod - def restart_service(self, service_name: str) -> None: - """Restarts the service with specified name and waits until it starts. - The service must be hosted on this host. - Args: - service_name: Name of the service to restart. - """ - - @abstractmethod - def get_data_directory(self, service_name: str) -> str: - """ - Getting path to data directory on node for further usage - (example: list databases pilorama.db) - - Args: - service_name: Name of storage node service. - """ - - @abstractmethod - def wait_success_suspend_process(self, process_name: str) -> None: - """Search for a service ID by its name and stop the process - Args: - process_name: Name - """ - - @abstractmethod - def wait_success_resume_process(self, process_name: str) -> None: - """Search for a service by its ID and start the process - Args: - process_name: Name - """ - - @abstractmethod - def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: - """Erases all data of the storage node with specified name. - - Args: - service_name: Name of storage node service. - cache_only: To delete cache only. - """ - - @abstractmethod - def wipefs_storage_node_data(self, service_name: str) -> None: - """Erases all data of the storage node with specified name. - - Args: - service_name: Name of storage node service. - """ - - def finish_wipefs(self, service_name: str) -> None: - """Erases all data of the storage node with specified name. - - Args: - service_name: Name of storage node service. - """ - - @abstractmethod - def delete_fstree(self, service_name: str) -> None: - """ - Deletes all fstrees in the node. - - Args: - service_name: Name of storage node service. - - """ - - @abstractmethod - def delete_metabase(self, service_name: str) -> None: - """ - Deletes all metabase*.db in the node. - - Args: - service_name: Name of storage node service. - - """ - - @abstractmethod - def delete_write_cache(self, service_name: str) -> None: - """ - Deletes all write_cache in the node. - - Args: - service_name: Name of storage node service. - - """ - - @abstractmethod - def delete_blobovnicza(self, service_name: str) -> None: - """ - Deletes all blobovniczas in the node. - - Args: - service_name: Name of storage node service. - - """ - - @abstractmethod - def delete_file(self, file_path: str) -> None: - """ - Deletes file with provided file path - - Args: - file_path: full path to the file to delete - - """ - - @abstractmethod - def is_file_exist(self, file_path: str) -> bool: - """ - Checks if file exist - - Args: - file_path: full path to the file to check - - """ - - @abstractmethod - def detach_disk(self, device: str) -> DiskInfo: - """Detaches disk device to simulate disk offline/failover scenario. - - Args: - device: Device name to detach. - - Returns: - internal service disk info related to host plugin (i.e. volume id for cloud devices), - which may be used to identify or re-attach existing volume back. - """ - - @abstractmethod - def attach_disk(self, device: str, disk_info: DiskInfo) -> None: - """Attaches disk device back. - - Args: - device: Device name to attach. - service_info: any info required for host plugin to identify/attach disk. - """ - - @abstractmethod - def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: - """Checks if disk device is attached. - - Args: - device: Device name to check. - service_info: any info required for host plugin to identify disk. - - Returns: - True if attached. - False if detached. - """ - - @abstractmethod - def dump_logs( - self, - directory_path: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - filter_regex: Optional[str] = None, - ) -> None: - """Dumps logs of all services on the host to specified directory. - - Args: - directory_path: Path to the directory where logs should be stored. - since: If set, limits the time from which logs should be collected. Must be in UTC. - until: If set, limits the time until which logs should be collected. Must be in UTC. - filter_regex: regex to filter output - """ - - @abstractmethod - def get_filtered_logs( - self, - filter_regex: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - unit: Optional[str] = None, - exclude_filter: Optional[str] = None, - priority: Optional[str] = None, - word_count: bool = None, - ) -> str: - """Get logs from host filtered by regex. - - Args: - filter_regex: regex filter for logs. - since: If set, limits the time from which logs should be collected. Must be in UTC. - until: If set, limits the time until which logs should be collected. Must be in UTC. - unit: required unit. - priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. - For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. - word_count: output type, expected values: lines, bytes, json - - Returns: - Found entries as str if any found. - Empty string otherwise. - """ - - @abstractmethod - def is_message_in_logs( - self, - message_regex: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - unit: Optional[str] = None, - ) -> bool: - """Checks logs on host for specified message regex. - - Args: - message_regex: message to find. - since: If set, limits the time from which logs should be collected. Must be in UTC. - until: If set, limits the time until which logs should be collected. Must be in UTC. - - Returns: - True if message found in logs in the given time frame. - False otherwise. - """ - - @abstractmethod - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: - """ - Waites for service to be in specified state. - - Args: - systemd_service_name: Service to wait state of. - expected_state: State to wait for - timeout: Seconds to wait - - """ - - def down_interface(self, interface: str) -> None: - shell = self.get_shell() - shell.exec(f"ip link set {interface} down") - - def up_interface(self, interface: str) -> None: - shell = self.get_shell() - shell.exec(f"ip link set {interface} up") - - def check_state(self, interface: str) -> str: - shell = self.get_shell() - return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() - - @retry(max_attempts=5, sleep_interval=5, expected_result="UP") - def check_state_up(self, interface: str) -> str: - return self.check_state(interface=interface) - - @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") - def check_state_down(self, interface: str) -> str: - return self.check_state(interface=interface) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py deleted file mode 100644 index 8477ee4..0000000 --- a/src/frostfs_testlib/load/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - K6ProcessAllocationStrategy, - LoadParams, - LoadScenario, - LoadType, - NodesSelectionStrategy, - Preset, - ReadFrom, -) -from frostfs_testlib.load.load_report import LoadReport -from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner diff --git a/src/frostfs_testlib/load/interfaces/loader.py b/src/frostfs_testlib/load/interfaces/loader.py deleted file mode 100644 index 2c818d9..0000000 --- a/src/frostfs_testlib/load/interfaces/loader.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.shell.interfaces import Shell - - -class Loader(ABC): - @abstractmethod - def get_shell(self) -> Shell: - """Get shell for the loader""" - - @property - @abstractmethod - def ip(self): - """Get address of the loader""" diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py deleted file mode 100644 index c0062a9..0000000 --- a/src/frostfs_testlib/load/interfaces/scenario_runner.py +++ /dev/null @@ -1,55 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import LoadParams -from frostfs_testlib.storage.cluster import ClusterNode - - -class ScenarioRunner(ABC): - @abstractmethod - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - """Preparation steps before running the load""" - - @abstractmethod - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - """Init K6 instances""" - - @abstractmethod - def get_k6_instances(self) -> list[K6]: - """Get K6 instances""" - - @abstractmethod - def start(self): - """Start K6 instances""" - - @abstractmethod - def stop(self): - """Stop K6 instances""" - - @abstractmethod - def preset(self): - """Run preset for load""" - - @property - @abstractmethod - def is_running(self) -> bool: - """Returns True if load is running at the moment""" - - @abstractmethod - def wait_until_finish(self, soft_timeout: int = 0): - """Wait until load is finished""" - - @abstractmethod - def get_results(self) -> dict: - """Get results from K6 run""" - - @abstractmethod - def get_loaders(self) -> list[Loader]: - """Return loaders""" diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py deleted file mode 100644 index 4be33ef..0000000 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass, field - -from frostfs_testlib.load.load_config import LoadParams, LoadScenario -from frostfs_testlib.load.load_metrics import get_metrics_object - - -@dataclass -class SummarizedErorrs: - total: int = field(default_factory=int) - percent: float = field(default_factory=float) - threshold: float = field(default_factory=float) - by_node: dict[str, int] = field(default_factory=dict) - - def calc_stats(self, operations): - self.total += sum(self.by_node.values()) - - if not operations: - return - - self.percent = self.total / operations * 100 - - -@dataclass -class SummarizedLatencies: - avg: float = field(default_factory=float) - min: float = field(default_factory=float) - max: float = field(default_factory=float) - by_node: dict[str, dict[str, int]] = field(default_factory=dict) - - def calc_stats(self): - if not self.by_node: - return - - avgs = [lt["avg"] for lt in self.by_node.values()] - self.avg = sum(avgs) / len(avgs) - - minimal = [lt["min"] for lt in self.by_node.values()] - self.min = min(minimal) - - maximum = [lt["max"] for lt in self.by_node.values()] - self.max = max(maximum) - - -@dataclass -class SummarizedStats: - threads: int = field(default_factory=int) - requested_rate: int = field(default_factory=int) - operations: int = field(default_factory=int) - rate: float = field(default_factory=float) - throughput: float = field(default_factory=float) - latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) - errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) - total_bytes: int = field(default_factory=int) - passed: bool = True - - def calc_stats(self): - self.errors.calc_stats(self.operations) - self.latencies.calc_stats() - self.passed = self.errors.percent <= self.errors.threshold - - @staticmethod - def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]: - if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0) - write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0) - read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0) - else: - write_vus = load_params.writers - read_vus = load_params.readers - delete_vus = load_params.deleters - - summarized = { - "Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate), - "Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate), - "Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate), - } - - for node_key, load_summary in load_summaries.items(): - metrics = get_metrics_object(load_params.scenario, load_summary) - for operation in metrics.operations: - target = summarized[operation._NAME] - if not operation.total_iterations: - continue - target.operations += operation.total_iterations - target.rate += operation.rate - target.latencies.by_node[node_key] = operation.latency - target.throughput += operation.throughput - target.errors.threshold = load_params.error_threshold - target.total_bytes += operation.total_bytes - if operation.failed_iterations: - target.errors.by_node[node_key] = operation.failed_iterations - - for operation in summarized.values(): - operation.calc_stats() - - return summarized diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py deleted file mode 100644 index 3e62a16..0000000 --- a/src/frostfs_testlib/load/k6.py +++ /dev/null @@ -1,268 +0,0 @@ -import json -import logging -import math -import os -from dataclasses import dataclass -from datetime import datetime -from threading import Event -from time import sleep -from typing import Any -from urllib.parse import urlparse - -from frostfs_testlib import reporter -from frostfs_testlib.credentials.interfaces import User -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType -from frostfs_testlib.processes.remote_process import RemoteProcess -from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD -from frostfs_testlib.shell import Shell -from frostfs_testlib.testing.test_control import wait_for_success - -EXIT_RESULT_CODE = 0 - -logger = logging.getLogger("NeoLogger") - - -@dataclass -class LoadResults: - data_sent: float = 0.0 - data_received: float = 0.0 - read_ops: float = 0.0 - write_ops: float = 0.0 - total_ops: float = 0.0 - - -class K6: - _k6_process: RemoteProcess - - def __init__( - self, - load_params: LoadParams, - endpoints: list[str], - k6_dir: str, - shell: Shell, - loader: Loader, - user: User, - ): - if load_params.scenario is None: - raise RuntimeError("Scenario should not be none") - - self.load_params = load_params - self.endpoints = endpoints - self.loader = loader - self.shell = shell - self.user = user - self.preset_output: str = "" - self.summary_json: str = os.path.join( - self.load_params.working_dir, - f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", - ) - - self._k6_dir: str = k6_dir - - command = ( - f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " - f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" - ) - remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify" - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) - - def _get_fill_percents(self): - fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n") - return [line.split() for line in fill_percents][:-1] - - def check_fill_percent(self): - fill_percents = self._get_fill_percents() - percent_mean = 0 - for line in fill_percents: - percent_mean += float(line[1].split("%")[0]) - percent_mean = percent_mean / len(fill_percents) - logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") - return percent_mean >= self.load_params.fill_percent - - @property - def process_dir(self) -> str: - return self._k6_process.process_dir - - def preset(self) -> str: - with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"): - preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" - preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" - preset_map = { - LoadType.gRPC: preset_grpc, - LoadType.S3: preset_s3, - LoadType.HTTP: preset_grpc, - } - - base_args = { - preset_grpc: [ - preset_grpc, - f"--endpoint {','.join(self.endpoints)}", - f"--wallet {self.user.wallet.path} ", - f"--config {self.user.wallet.config_path} ", - ], - preset_s3: [ - preset_s3, - f"--endpoint {','.join(self.endpoints)}", - ], - } - - preset_scenario = preset_map[self.load_params.load_type] - command_args = base_args[preset_scenario].copy() - - command_args += self.load_params.get_preset_arguments() - - command = " ".join(command_args) - result = self.shell.exec(command) - - assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}" - - self.preset_output = result.stdout.strip("\n") - return self.preset_output - - @reporter.step("Generate K6 variables") - def _generate_k6_variables(self) -> str: - env_vars = self.load_params.get_k6_vars() - - env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) - env_vars["SUMMARY_JSON"] = self.summary_json - - reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") - return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) - - @reporter.step("Generate env variables") - def _generate_env_variables(self) -> str: - env_vars = self.load_params.get_env_vars() - if not env_vars: - return "" - reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables") - return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " " - - def get_start_time(self) -> datetime: - return datetime.fromtimestamp(self._k6_process.start_time()) - - def get_end_time(self) -> datetime: - return datetime.fromtimestamp(self._k6_process.end_time()) - - def start(self) -> None: - with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): - self._k6_process.start() - - def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None: - with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): - if self.load_params.scenario == LoadScenario.VERIFY: - timeout = self.load_params.verify_time or 0 - else: - timeout = self.load_params.load_time or 0 - - start_time = int(self.get_start_time().timestamp()) - - current_time = int(datetime.utcnow().timestamp()) - working_time = current_time - start_time - remaining_time = timeout - working_time - - setup_teardown_time = ( - int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip()) - ) - remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time - timeout = remaining_time_including_setup_and_teardown - - if soft_timeout: - timeout = min(timeout, soft_timeout) - - original_timeout = timeout - - timeouts = { - "K6 start time": start_time, - "Current time": current_time, - "K6 working time": working_time, - "Remaining time for load": remaining_time, - "Setup and teardown": setup_teardown_time, - "Remaining time including setup/teardown": remaining_time_including_setup_and_teardown, - "Soft timeout": soft_timeout, - "Selected timeout": original_timeout, - } - - reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt") - - min_wait_interval = 10 - wait_interval = min_wait_interval - if self._k6_process is None: - assert "No k6 instances were executed" - - while timeout > 0: - if not self.load_params.fill_percent is None: - with reporter.step(f"Check the percentage of filling of all data disks on the node"): - if self.check_fill_percent(): - logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") - event.set() - self.stop() - return - - if event.is_set(): - self.stop() - return - - if not self._k6_process.running(): - return - - remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" - remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" - logger.info( - f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..." - ) - sleep(wait_interval) - timeout -= min(timeout, wait_interval) - wait_interval = max( - min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval, - min_wait_interval, - ) - - if not self._k6_process.running(): - return - - self.stop() - if not soft_timeout: - raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") - - def get_results(self) -> Any: - with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"): - self.__log_output() - - if not self.summary_json: - return None - - summary_text = self.shell.exec(f"cat {self.summary_json}").stdout - summary_json = json.loads(summary_text) - endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0] - allure_filenames = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json", - K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json", - } - allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] - - reporter.attach(summary_text, allure_filename) - return summary_json - - def stop(self) -> None: - with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): - if self.is_running(): - self._k6_process.stop() - - self._wait_until_process_end() - - def is_running(self) -> bool: - if self._k6_process: - return self._k6_process.running() - return False - - @reporter.step("Wait until K6 process end") - @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") - def _wait_until_process_end(self): - return self._k6_process.running() - - def __log_output(self) -> None: - reporter.attach(self._k6_process.stdout(full=True), "K6 stdout") - reporter.attach(f"{self._k6_process.process_dir}/stderr", "K6 stderr path") diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py deleted file mode 100644 index 3830203..0000000 --- a/src/frostfs_testlib/load/load_config.py +++ /dev/null @@ -1,491 +0,0 @@ -import math -import os -from dataclasses import dataclass, field, fields, is_dataclass -from enum import Enum -from types import MappingProxyType -from typing import Any, Callable, Optional, get_args - -from frostfs_testlib.utils.converting_utils import calc_unit - - -def convert_time_to_seconds(time: int | str | None) -> int: - if time is None: - return None - if str(time).isdigit(): - seconds = int(time) - else: - days, hours, minutes = 0, 0, 0 - if "d" in time: - days, time = time.split("d") - if "h" in time: - hours, time = time.split("h") - if "min" in time: - minutes = time.replace("min", "") - seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60 - return seconds - - -def force_list(input: str | list[str]): - if input is None: - return None - - if isinstance(input, list): - return list(map(str.strip, input)) - - return [input.strip()] - - -class LoadType(Enum): - gRPC = "grpc" - S3 = "s3" - HTTP = "http" - - -class LoadScenario(Enum): - gRPC = "grpc" - gRPC_CAR = "grpc_car" - S3 = "s3" - S3_CAR = "s3_car" - S3_MULTIPART = "s3_multipart" - S3_LOCAL = "s3local" - HTTP = "http" - VERIFY = "verify" - LOCAL = "local" - - -class ReadFrom(Enum): - REGISTRY = "registry" - PRESET = "preset" - MANUAL = "manual" - - -all_load_scenarios = [ - LoadScenario.gRPC, - LoadScenario.S3, - LoadScenario.HTTP, - LoadScenario.S3_CAR, - LoadScenario.gRPC_CAR, - LoadScenario.LOCAL, - LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL, -] -all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] - -constant_vus_scenarios = [ - LoadScenario.gRPC, - LoadScenario.S3, - LoadScenario.HTTP, - LoadScenario.LOCAL, - LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL, -] -constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] - -grpc_preset_scenarios = [ - LoadScenario.gRPC, - LoadScenario.HTTP, - LoadScenario.gRPC_CAR, - LoadScenario.LOCAL, -] -s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] - - -@dataclass -class MetaField: - name: str - metadata: MappingProxyType - value: Any - - -def metadata_field( - applicable_scenarios: list[LoadScenario], - preset_param: Optional[str] = None, - scenario_variable: Optional[str] = None, - string_repr: Optional[bool] = True, - distributed: Optional[bool] = False, - formatter: Optional[Callable] = None, - env_variable: Optional[str] = None, -): - return field( - default=None, - metadata={ - "applicable_scenarios": applicable_scenarios, - "preset_argument": preset_param, - "scenario_variable": scenario_variable, - "string_repr": string_repr, - "distributed": distributed, - "formatter": formatter, - "env_variable": env_variable, - }, - ) - - -class NodesSelectionStrategy(Enum): - # Select ONE random node from cluster nodes. - RANDOM_SINGLE = "RANDOM_SINGLE" - # Select All nodes. - ALL = "ALL" - # Select All nodes except node under test (useful for failover). This is DEFAULT one - ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" - # Select ONE random node except under test (useful for failover). - RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" - # Select node under test - NODE_UNDER_TEST = "NODE_UNDER_TEST" - - -class EndpointSelectionStrategy(Enum): - """Enum which defines which endpoint to select from each storage node""" - - # Select All endpoints. - ALL = "ALL" - # Select first endpoint from node - FIRST = "FIRST" - - -class K6ProcessAllocationStrategy(Enum): - """Enum which defines how K6 processes should be allocated""" - - # Each load node will get one k6 process with all endpoints (Default) - PER_LOAD_NODE = "PER_LOAD_NODE" - # Each endpoint will get it's own k6 process regardless of number of load nodes. - # If there is not enough load nodes, some nodes may have multiple k6 processes - PER_ENDPOINT = "PER_ENDPOINT" - - -class MetaConfig: - def _get_field_formatter(self, field_name: str) -> Callable | None: - data_fields = fields(self) - formatters = [ - field.metadata["formatter"] - for field in data_fields - if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None - ] - if formatters: - return formatters[0] - - return None - - def __setattr__(self, field_name, value): - formatter = self._get_field_formatter(field_name) - if formatter: - value = formatter(value) - - super().__setattr__(field_name, value) - - -@dataclass -class Preset(MetaConfig): - # ------ COMMON ------ - # Amount of objects which should be created - objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) - # Preset json. Filled automatically. - pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) - # Workers count for preset - workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # TODO: Deprecated. Acl for container/buckets - acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) - # APE rule for containers instead of deprecated ACL - rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) - - # ------ GRPC ------ - # Amount of containers which should be created - containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) - # Container placement policy for containers for gRPC - container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) - # Number of retries for creation of container - container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) - - # ------ S3 ------ - # Amount of buckets which should be created - buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) - # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) - - # Delay between containers creation and object upload for preset - object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) - - # Flag to control preset erorrs - ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) - - # Flag to ensure created containers store data on local endpoints - local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) - - -@dataclass -class PrometheusParams(MetaConfig): - # Prometheus server URL - server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) - # Prometheus trend stats - trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False) - # Additional tags - metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) - - -@dataclass -class LoadParams(MetaConfig): - # ------- CONTROL PARAMS ------- - # Load type can be gRPC, HTTP, S3. - load_type: LoadType - # Load scenario from k6 scenarios - scenario: Optional[LoadScenario] = None - # Strategy to select nodes under load. See NodesSelectionStrategy class for more details. - # default is ALL_EXCEPT_UNDER_TEST - nodes_selection_strategy: Optional[NodesSelectionStrategy] = None - # Strategy which defines which endpoint to select from each storage node - endpoint_selection_strategy: Optional[EndpointSelectionStrategy] = None - # Strategy which defines how K6 processes should be allocated - k6_process_allocation_strategy: Optional[K6ProcessAllocationStrategy] = None - # Set to true in order to verify uploaded objects after K6 load finish. Default is True. - verify: Optional[bool] = None - # Just id for load so distinct it between runs. Filled automatically. - load_id: Optional[str] = None - # Acceptable number of load errors in % - # 100 means 100% errors allowed - # 1.5 means 1.5% errors allowed - # 0 means no errors allowed - error_threshold: Optional[float] = None - # Working directory - working_dir: Optional[str] = None - # Preset for the k6 run - preset: Optional[Preset] = None - # K6 download url - k6_url: Optional[str] = None - # Requests module url - requests_module_url: Optional[str] = None - # aws cli download url - awscli_url: Optional[str] = None - # No ssl verification flag - no_verify_ssl: Optional[bool] = metadata_field( - [ - LoadScenario.S3, - LoadScenario.S3_CAR, - LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL, - LoadScenario.VERIFY, - LoadScenario.HTTP, - ], - "no-verify-ssl", - "NO_VERIFY_SSL", - False, - ) - # Percentage of filling of all data disks on all nodes - fill_percent: Optional[float] = None - # if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved. - max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB") - # if set, the payload is generated on the fly and is not read into memory fully. - streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) - # Output format - output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False) - # Prometheus params - prometheus: Optional[PrometheusParams] = None - - # ------- COMMON SCENARIO PARAMS ------- - # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) - # Object size in KB for load and preset. - object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) - # For read operations, controls from which set get objects to read - read_from: Optional[ReadFrom] = None - # For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation - read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False) - # Output registry K6 file. Filled automatically. - registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) - # In case if we want to use custom registry file left from another load run - custom_registry: Optional[str] = None - # In case if we want to use custom registry file left from another load run - force_fresh_registry: Optional[bool] = None - # Specifies the minimum duration of every single execution (i.e. iteration). - # Any iterations that are shorter than this value will cause that VU to - # sleep for the remainder of the time until the specified minimum duration is reached. - min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) - # Prepare/cut objects locally on client before sending - prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) - # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios - # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout - setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) - - # Delay for read operations in case if we read from registry - read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False) - - # Initialization time for each VU for k6 load - vu_init_time: Optional[float] = None - - # ------- CONSTANT VUS SCENARIO PARAMS ------- - # Amount of Writers VU. - writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) - # Amount of Readers VU. - readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True) - # Amount of Deleters VU. - deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True) - - # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- - # Number of iterations to start during each timeUnit period for write. - write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) - - # Number of iterations to start during each timeUnit period for read. - read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) - - # Number of iterations to start during each timeUnit period for delete. - delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) - - # Amount of preAllocatedVUs for write operations. - preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) - # Amount of maxVUs for write operations. - max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) - - # Amount of preAllocatedVUs for read operations. - preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) - # Amount of maxVUs for read operations. - max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) - - # Amount of preAllocatedVUs for read operations. - preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) - # Amount of maxVUs for delete operations. - max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) - - # Multipart - # Number of parts to upload in parallel - writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True) - # part size must be greater than (5 MB) - write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) - - # Period of time to apply the rate value. - time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) - - # ------- VERIFY SCENARIO PARAMS ------- - # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). - verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) - # Amount of Verification VU. - verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) - - # ------- LOCAL SCENARIO PARAMS ------- - # Config file location (filled automatically) - config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) - # Config directory location (filled automatically) - config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) - - def set_id(self, load_id): - self.load_id = load_id - - if self.read_from == ReadFrom.REGISTRY: - self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") - - # For now it's okay to have it this way - if self.custom_registry is not None: - self.registry_file = self.custom_registry - - if self.read_from == ReadFrom.PRESET: - self.registry_file = None - - if self.preset: - self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") - - def get_k6_vars(self): - env_vars = { - meta_field.metadata["scenario_variable"]: meta_field.value - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] - and meta_field.metadata["scenario_variable"] - and meta_field.value is not None - } - - return env_vars - - def get_env_vars(self): - env_vars = { - meta_field.metadata["env_variable"]: meta_field.value - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] - and meta_field.metadata["env_variable"] - and meta_field.value is not None - } - - return env_vars - - def __post_init__(self): - default_scenario_map = { - LoadType.gRPC: LoadScenario.gRPC, - LoadType.HTTP: LoadScenario.HTTP, - LoadType.S3: LoadScenario.S3, - } - - if self.scenario is None: - self.scenario = default_scenario_map[self.load_type] - - def get_preset_arguments(self): - command_args = [ - self._get_preset_argument(meta_field) - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] - and meta_field.metadata["preset_argument"] - and meta_field.value is not None - and self._get_preset_argument(meta_field) - ] - - return command_args - - def get_init_time(self) -> int: - return math.ceil(self._get_total_vus() * self.vu_init_time) - - def _get_total_vus(self) -> int: - vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"] - data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields] - return sum(data_fields) - - def _get_applicable_fields(self): - applicable_fields = [ - meta_field - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value - ] - - return applicable_fields - - @staticmethod - def _get_preset_argument(meta_field: MetaField) -> str: - if isinstance(meta_field.value, bool): - # For preset calls, bool values are passed with just -- if the value is True - return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" - - if isinstance(meta_field.value, list): - return ( - " ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else "" - ) - - return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" - - @staticmethod - def _get_meta_fields(instance) -> list[MetaField]: - data_fields = fields(instance) - - fields_with_data = [ - MetaField(field.name, field.metadata, getattr(instance, field.name)) - for field in data_fields - if field.metadata and getattr(instance, field.name) is not None - ] - - for field in data_fields: - actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) - if is_dataclass(actual_field_type) and getattr(instance, field.name): - fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) - - return fields_with_data or [] - - def __str__(self) -> str: - load_type_str = self.scenario.value if self.scenario else self.load_type.value - # TODO: migrate load_params defaults to testlib - if self.object_size is not None: - size, unit = calc_unit(self.object_size, 1) - static_params = [f"{load_type_str} {size:.4g} {unit}"] - else: - static_params = [f"{load_type_str}"] - - dynamic_params = [ - f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"] - ] - params = ", ".join(static_params + dynamic_params) - - return params - - def __repr__(self) -> str: - return self.__str__() diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py deleted file mode 100644 index 035ce8b..0000000 --- a/src/frostfs_testlib/load/load_metrics.py +++ /dev/null @@ -1,243 +0,0 @@ -from abc import ABC -from typing import Any, Optional - -from frostfs_testlib.load.load_config import LoadScenario - - -class OperationMetric(ABC): - _NAME = "" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "" - _LATENCY = "" - - def __init__(self, summary) -> None: - self.summary = summary - self.metrics = summary["metrics"] - - @property - def total_iterations(self) -> int: - return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) - - @property - def success_iterations(self) -> int: - return self._get_metric(self._SUCCESS) - - @property - def latency(self) -> dict: - return self._get_metric(self._LATENCY) - - @property - def rate(self) -> float: - return self._get_metric_rate(self._SUCCESS) - - @property - def failed_iterations(self) -> int: - return self._get_metric(self._ERRORS) - - @property - def throughput(self) -> float: - return self._get_metric_rate(self._THROUGHPUT) - - @property - def total_bytes(self) -> float: - return self._get_metric(self._THROUGHPUT) - - def _get_metric(self, metric: str) -> int: - metrics_method_map = { - "counter": self._get_counter_metric, - "gauge": self._get_gauge_metric, - "trend": self._get_trend_metrics, - } - - if metric not in self.metrics: - return 0 - - metric = self.metrics[metric] - metric_type = metric["type"] - if metric_type not in metrics_method_map: - raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") - - return metrics_method_map[metric_type](metric) - - def _get_metric_rate(self, metric: str) -> int: - metrics_method_map = {"counter": self._get_counter_metric_rate} - - if metric not in self.metrics: - return 0 - - metric = self.metrics[metric] - metric_type = metric["type"] - if metric_type not in metrics_method_map: - raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") - - return metrics_method_map[metric_type](metric) - - def _get_counter_metric_rate(self, metric: str) -> int: - return metric["values"]["rate"] - - def _get_counter_metric(self, metric: str) -> int: - return metric["values"]["count"] - - def _get_gauge_metric(self, metric: str) -> int: - return metric["values"]["value"] - - def _get_trend_metrics(self, metric: str) -> int: - return metric["values"] - - -class WriteOperationMetric(OperationMetric): - _NAME = "Write" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "data_sent" - _LATENCY = "" - - -class ReadOperationMetric(OperationMetric): - _NAME = "Read" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "data_received" - _LATENCY = "" - - -class DeleteOperationMetric(OperationMetric): - _NAME = "Delete" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "" - _LATENCY = "" - - -class GrpcWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "frostfs_obj_put_success" - _ERRORS = "frostfs_obj_put_fails" - _LATENCY = "frostfs_obj_put_duration" - - -class GrpcReadOperationMetric(ReadOperationMetric): - _SUCCESS = "frostfs_obj_get_success" - _ERRORS = "frostfs_obj_get_fails" - _LATENCY = "frostfs_obj_get_duration" - - -class GrpcDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "frostfs_obj_delete_success" - _ERRORS = "frostfs_obj_delete_fails" - _LATENCY = "frostfs_obj_delete_duration" - - -class S3WriteOperationMetric(WriteOperationMetric): - _SUCCESS = "aws_obj_put_success" - _ERRORS = "aws_obj_put_fails" - _LATENCY = "aws_obj_put_duration" - - -class S3ReadOperationMetric(ReadOperationMetric): - _SUCCESS = "aws_obj_get_success" - _ERRORS = "aws_obj_get_fails" - _LATENCY = "aws_obj_get_duration" - - -class S3DeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "aws_obj_delete_success" - _ERRORS = "aws_obj_delete_fails" - _LATENCY = "aws_obj_delete_duration" - - -class S3LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "s3local_obj_put_success" - _ERRORS = "s3local_obj_put_fails" - _LATENCY = "s3local_obj_put_duration" - - -class S3LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "s3local_obj_get_success" - _ERRORS = "s3local_obj_get_fails" - _LATENCY = "s3local_obj_get_duration" - - -class LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "local_obj_put_success" - _ERRORS = "local_obj_put_fails" - _LATENCY = "local_obj_put_duration" - - -class LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "local_obj_get_success" - _ERRORS = "local_obj_get_fails" - - -class LocalDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "local_obj_delete_success" - _ERRORS = "local_obj_delete_fails" - - -class VerifyReadOperationMetric(ReadOperationMetric): - _SUCCESS = "verified_obj" - _ERRORS = "invalid_obj" - - -class MetricsBase(ABC): - def __init__(self) -> None: - self.write: Optional[WriteOperationMetric] = None - self.read: Optional[ReadOperationMetric] = None - self.delete: Optional[DeleteOperationMetric] = None - - @property - def operations(self) -> list[OperationMetric]: - return [metric for metric in [self.write, self.read, self.delete] if metric is not None] - - -class GrpcMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = GrpcWriteOperationMetric(summary) - self.read = GrpcReadOperationMetric(summary) - self.delete = GrpcDeleteOperationMetric(summary) - - -class S3Metrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = S3WriteOperationMetric(summary) - self.read = S3ReadOperationMetric(summary) - self.delete = S3DeleteOperationMetric(summary) - - -class S3LocalMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = S3LocalWriteOperationMetric(summary) - self.read = S3LocalReadOperationMetric(summary) - - -class LocalMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = LocalWriteOperationMetric(summary) - self.read = LocalReadOperationMetric(summary) - self.delete = LocalDeleteOperationMetric(summary) - - -class VerifyMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.read = VerifyReadOperationMetric(summary) - - -def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: - class_map = { - LoadScenario.gRPC: GrpcMetrics, - LoadScenario.gRPC_CAR: GrpcMetrics, - LoadScenario.HTTP: GrpcMetrics, - LoadScenario.S3: S3Metrics, - LoadScenario.S3_CAR: S3Metrics, - LoadScenario.S3_MULTIPART: S3Metrics, - LoadScenario.S3_LOCAL: S3LocalMetrics, - LoadScenario.VERIFY: VerifyMetrics, - LoadScenario.LOCAL: LocalMetrics, - } - - return class_map[load_type](summary) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py deleted file mode 100644 index 2dfac26..0000000 --- a/src/frostfs_testlib/load/load_report.py +++ /dev/null @@ -1,178 +0,0 @@ -from datetime import datetime -from typing import Optional - -import yaml - -from frostfs_testlib.load.interfaces.summarized import SummarizedStats -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario -from frostfs_testlib.utils.converting_utils import calc_unit - - -class LoadReport: - def __init__(self, load_test) -> None: - self.load_test = load_test - # List of load summaries dict - self.load_summaries_list: Optional[list[dict]] = [] - self.load_params: Optional[LoadParams] = None - self.start_time: Optional[datetime] = None - self.end_time: Optional[datetime] = None - - def set_start_time(self, time: datetime = None): - if time is None: - time = datetime.utcnow() - self.start_time = time - - def set_end_time(self, time: datetime = None): - if time is None: - time = datetime.utcnow() - self.end_time = time - - def add_summaries(self, load_summaries: dict): - self.load_summaries_list.append(load_summaries) - - def set_load_params(self, load_params: LoadParams): - self.load_params = load_params - - def get_report_html(self): - report_sections = [ - [self.load_params, self._get_load_id_section_html], - [self.load_test, self._get_load_params_section_html], - [self.load_summaries_list, self._get_totals_section_html], - [self.end_time, self._get_test_time_html], - ] - - html = "" - for section in report_sections: - if section[0] is not None: - html += section[1]() - - return html - - def _get_load_params_section_html(self) -> str: - params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) - params = params.replace("\n", "
").replace(" ", " ") - section_html = f"""

Scenario params

- -
{params}
-
""" - - return section_html - - def _get_load_id_section_html(self) -> str: - section_html = f"""

Load ID: {self.load_params.load_id}

-
""" - - return section_html - - def _get_test_time_html(self) -> str: - if not self.start_time or not self.end_time: - return "" - - html = f"""

Scenario duration

- {self.start_time} - {self.end_time}
-
- """ - - return html - - def _seconds_to_formatted_duration(self, seconds: int) -> str: - """Converts N number of seconds to formatted output ignoring zeroes. - Examples: - 186399 -> "2d3h46m39s" - 86399 -> "23h59m59s" - 86399 -> "23h59m59s" - 3605 -> "1h5s" - 123 -> "2m3s" - """ - units = {"d": 86400, "h": 3600, "m": 60, "s": 1} - parts = [] - remaining = seconds - for divisor in units.values(): - part = remaining // divisor - remaining -= divisor * part - parts.append(part) - - return "".join([f"{val}{unit}" for unit, val in zip(units, parts) if val > 0]) - - def _row(self, caption: str, value: str) -> str: - return f"{caption}{value}" - - def _get_model_string(self): - if self.load_params.min_iteration_duration is not None: - return f"min_iteration_duration={self.load_params.min_iteration_duration}" - - model_map = { - LoadScenario.gRPC: "closed model", - LoadScenario.S3: "closed model", - LoadScenario.S3_MULTIPART: "closed model", - LoadScenario.HTTP: "closed model", - LoadScenario.gRPC_CAR: "open model", - LoadScenario.S3_CAR: "open model", - LoadScenario.LOCAL: "local fill", - LoadScenario.S3_LOCAL: "local fill", - } - - return model_map[self.load_params.scenario] - - def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): - throughput_html = "" - if stats.throughput > 0: - throughput, unit = calc_unit(stats.throughput) - throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") - - bytes_html = "" - if stats.total_bytes > 0: - total_bytes, total_bytes_unit = calc_unit(stats.total_bytes) - bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}") - - per_node_errors_html = "" - for node_key, errors in stats.errors.by_node.items(): - if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: - per_node_errors_html += self._row(f"At {node_key}", errors) - - latency_html = "" - for node_key, latencies in stats.latencies.by_node.items(): - latency_values = "N/A" - if latencies: - latency_values = "" - for param_name, param_val in latencies.items(): - latency_values += f"{param_name}={param_val:.2f}ms " - - latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) - - object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) - duration = self._seconds_to_formatted_duration(self.load_params.load_time) - model = self._get_model_string() - requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else "" - # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s - short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" - - html = f""" - - - - {self._row("Total operations", stats.operations)} - {self._row("OP/sec", f"{stats.rate:.2f}")} - {bytes_html} - {throughput_html} - {latency_html} - - {per_node_errors_html} - {self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} - {self._row("Threshold", f"{stats.errors.threshold:.2f}%")} -
{short_summary}
Metrics
Errors


- """ - - return html - - def _get_totals_section_html(self): - html = "" - for i in range(len(self.load_summaries_list)): - html += f"

Load Results for load #{i+1}

" - - summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) - for operation_type, stats in summarized.items(): - if stats.operations: - html += self._get_operations_sub_section_html(operation_type, stats) - - return html diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py deleted file mode 100644 index 97b0ffa..0000000 --- a/src/frostfs_testlib/load/load_verifiers.py +++ /dev/null @@ -1,68 +0,0 @@ -from frostfs_testlib import reporter -from frostfs_testlib.load.interfaces.summarized import SummarizedStats -from frostfs_testlib.load.load_config import LoadParams, LoadScenario -from frostfs_testlib.load.load_metrics import get_metrics_object - - -class LoadVerifier: - def __init__(self, load_params: LoadParams) -> None: - self.load_params = load_params - - def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: - summarized = SummarizedStats.collect(self.load_params, load_summaries) - issues = [] - - for operation_type, stats in summarized.items(): - if stats.threads and not stats.operations: - issues.append(f"No any {operation_type.lower()} operation was performed") - - if stats.errors.percent > stats.errors.threshold: - rate_str = self._get_rate_str(stats.errors.percent) - issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%") - - return issues - - def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]: - verify_issues: list[str] = [] - for k6_process_label in load_summaries: - with reporter.step(f"Check verify scenario results for {k6_process_label}"): - verify_issues.extend( - self._collect_verify_issues_on_process( - k6_process_label, - load_summaries[k6_process_label], - verification_summaries[k6_process_label], - ) - ) - return verify_issues - - def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: - return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" - - def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: - issues = [] - - load_metrics = get_metrics_object(self.load_params.scenario, load_summary) - - writers = self.load_params.writers or self.load_params.preallocated_writers or 0 - deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 - - delete_success = 0 - - if deleters > 0: - delete_success = load_metrics.delete.success_iterations - - if verification_summary: - verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) - verified_objects = verify_metrics.read.success_iterations - invalid_objects = verify_metrics.read.failed_iterations - total_left_objects = load_metrics.write.success_iterations - delete_success - - if invalid_objects > 0: - issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") - # Due to interruptions we may see total verified objects to be less than written on writers count - if abs(total_left_objects - verified_objects) > writers: - issues.append( - f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." - ) - - return issues diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py deleted file mode 100644 index 1e0e97f..0000000 --- a/src/frostfs_testlib/load/loaders.py +++ /dev/null @@ -1,60 +0,0 @@ -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.resources.load_params import ( - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_USER, -) -from frostfs_testlib.shell.interfaces import Shell, SshCredentials -from frostfs_testlib.shell.ssh_shell import SSHShell -from frostfs_testlib.storage.cluster import ClusterNode - - -class RemoteLoader(Loader): - def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None: - self.ssh_credentials = ssh_credentials - self._ip = ip - - @property - def ip(self): - return self._ip - - def get_shell(self) -> Shell: - ssh_client = SSHShell( - host=self.ip, - login=self.ssh_credentials.ssh_login, - password=self.ssh_credentials.ssh_password, - private_key_path=self.ssh_credentials.ssh_key_path, - private_key_passphrase=self.ssh_credentials.ssh_key_passphrase, - ) - - return ssh_client - - @classmethod - def from_ip_list(cls, ip_list: list[str]) -> list[Loader]: - loaders: list[Loader] = [] - ssh_credentials = SshCredentials( - LOAD_NODE_SSH_USER, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - ) - - for ip in ip_list: - loaders.append(RemoteLoader(ssh_credentials, ip)) - - return loaders - - -class NodeLoader(Loader): - """When ClusterNode is the loader for itself (for Local scenario only).""" - - def __init__(self, cluster_node: ClusterNode) -> None: - self.cluster_node = cluster_node - - def get_shell(self) -> Shell: - return self.cluster_node.host.get_shell() - - @property - def ip(self): - return self.cluster_node.host_ip diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py deleted file mode 100644 index 1ceac09..0000000 --- a/src/frostfs_testlib/load/runners.py +++ /dev/null @@ -1,466 +0,0 @@ -import copy -import itertools -import math -import time -from dataclasses import fields -from threading import Event -from typing import Optional -from urllib.parse import urlparse - -from frostfs_testlib import reporter -from frostfs_testlib.credentials.interfaces import S3Credentials, User -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType -from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.resources import optionals -from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES -from frostfs_testlib.shell.command_inspectors import SuInspector -from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.testing import parallel, run_optionally -from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils import datetime_utils -from frostfs_testlib.utils.file_keeper import FileKeeper - - -class RunnerBase(ScenarioRunner): - k6_instances: list[K6] - loaders: list[Loader] - - @reporter.step("Run preset on loaders") - def preset(self): - parallel([k6.preset for k6 in self.k6_instances]) - - @reporter.step("Wait until load finish") - def wait_until_finish(self, soft_timeout: int = 0): - event = Event() - parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout) - - @property - def is_running(self): - futures = parallel([k6.is_running for k6 in self.k6_instances]) - - return any([future.result() for future in futures]) - - def get_k6_instances(self): - return self.k6_instances - - def get_loaders(self) -> list[Loader]: - return self.loaders - - -class DefaultRunner(RunnerBase): - user: User - - def __init__( - self, - user: User, - load_ip_list: Optional[list[str]] = None, - ) -> None: - if load_ip_list is None: - load_ip_list = LOAD_NODES - self.loaders = RemoteLoader.from_ip_list(load_ip_list) - self.user = user - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Preparation steps") - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - if load_params.force_fresh_registry and load_params.custom_registry: - with reporter.step("Forcing fresh registry files"): - parallel(self._force_fresh_registry, self.loaders, load_params) - - if load_params.load_type != LoadType.S3: - return - - with reporter.step("Init s3 client on loaders"): - s3_credentials = self.user.s3_credentials - parallel(self._aws_configure_on_loader, self.loaders, s3_credentials) - - def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): - with reporter.step(f"Forcing fresh registry on {loader.ip}"): - shell = loader.get_shell() - shell.exec(f"rm -f {load_params.registry_file}") - - def _aws_configure_on_loader( - self, - loader: Loader, - s3_credentials: S3Credentials, - ): - with reporter.step(f"Aws configure on {loader.ip}"): - configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - - @reporter.step("Init k6 instances") - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - self.k6_instances = [] - cycled_loaders = itertools.cycle(self.loaders) - - k6_distribution_count = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders), - K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), - } - endpoints_generators = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), - K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]), - } - k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] - endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] - - distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count) - - futures = parallel( - self._init_k6_instance, - distributed_load_params_list, - loader=cycled_loaders, - endpoints=endpoints_gen, - k6_dir=k6_dir, - ) - self.k6_instances = [future.result() for future in futures] - - def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str): - shell = loader.get_shell() - with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}") - shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}") - - return K6( - load_params_for_loader, - endpoints, - k6_dir, - shell, - loader, - self.user, - ) - - def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]: - divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) - distributed_load_params: list[LoadParams] = [] - - for i in range(workers_count): - load_params = copy.deepcopy(original_load_params) - # Append #i here in case if multiple k6 processes goes into same load node - load_params.set_id(f"{load_params.load_id}_{i}") - distributed_load_params.append(load_params) - - load_fields = fields(original_load_params) - - for field in load_fields: - if ( - field.metadata - and original_load_params.scenario in field.metadata["applicable_scenarios"] - and field.metadata["distributed"] - and getattr(original_load_params, field.name) is not None - ): - original_value = getattr(original_load_params, field.name) - distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count) - for i in range(workers_count): - setattr(distributed_load_params[i], field.name, distribution[i]) - - return distributed_load_params - - def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]: - """ - This function will distribute evenly as possible X clients to Y workers. - For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) - this will return [38, 38, 37, 37]. - - Args: - clients_count: amount of things needs to be distributed. - workers_count: amount of workers. - - Returns: - list of distribution. - """ - if workers_count < 1: - raise Exception("Workers cannot be less then 1") - - # Amount of guaranteed payload on one worker - clients_per_worker = clients_count // workers_count - # Remainder of clients left to be distributed - remainder = clients_count - clients_per_worker * workers_count - - distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)] - return distribution - - def start(self): - load_params = self.k6_instances[0].load_params - - parallel([k6.start for k6 in self.k6_instances]) - - wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): - time.sleep(wait_after_start_time) - - def stop(self): - for k6_instance in self.k6_instances: - k6_instance.stop() - - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - if k6_instance.load_params.k6_process_allocation_strategy is None: - raise RuntimeError("k6_process_allocation_strategy should not be none") - - result = k6_instance.get_results() - endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0] - keys_map = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, - K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint, - } - key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] - results[key] = result - - return results - - -class LocalRunner(RunnerBase): - cluster_state_controller: ClusterStateController - file_keeper: FileKeeper - user: User - - def __init__( - self, - cluster_state_controller: ClusterStateController, - file_keeper: FileKeeper, - nodes_under_load: list[ClusterNode], - user: User, - ) -> None: - self.cluster_state_controller = cluster_state_controller - self.file_keeper = file_keeper - self.loaders = [NodeLoader(node) for node in nodes_under_load] - self.nodes_under_load = nodes_under_load - self.user = user - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Preparation steps") - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - parallel(self.prepare_node, nodes_under_load, k6_dir, load_params) - - @retry(3, 5, expected_result=True) - def allow_user_to_login_in_system(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - - result = None - try: - shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") - self.lock_passwd_on_node(cluster_node) - options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)]) - result = shell.exec("whoami", options) - finally: - if not result or result.return_code: - self.restore_passwd_on_node(cluster_node) - return False - - return True - - @reporter.step("Prepare node {cluster_node}") - def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): - shell = cluster_node.host.get_shell() - - with reporter.step("Allow storage user to login into system"): - self.allow_user_to_login_in_system(cluster_node) - - with reporter.step("Update limits.conf"): - limits_path = "/etc/security/limits.conf" - self.file_keeper.add(cluster_node.storage_node, limits_path) - content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" - shell.exec(f"echo '{content}' | sudo tee {limits_path}") - - with reporter.step("Download K6"): - shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") - shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") - shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") - shell.exec(f"sudo chmod -R 777 {k6_dir}") - - with reporter.step("chmod 777 wallet related files on loader"): - shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}") - shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}") - - @reporter.step("Init k6 instances") - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - self.k6_instances = [] - futures = parallel( - self._init_k6_instance, - self.loaders, - load_params, - k6_dir, - ) - self.k6_instances = [future.result() for future in futures] - - def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str): - shell = loader.get_shell() - with reporter.step(f"Init K6 instance on {loader.ip}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params.working_dir}") - # If we chmod /home/ folder we can no longer ssh to the node - # !! IMPORTANT !! - if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" - ): - shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") - - return K6( - load_params, - ["localhost:8080"], - k6_dir, - shell, - loader, - self.user, - ) - - def start(self): - load_params = self.k6_instances[0].load_params - - self.cluster_state_controller.stop_services_of_type(S3Gate) - self.cluster_state_controller.stop_services_of_type(StorageNode) - - parallel([k6.start for k6 in self.k6_instances]) - - wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): - time.sleep(wait_after_start_time) - - @reporter.step("Restore passwd on {cluster_node}") - def restore_passwd_on_node(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("sudo chattr -i /etc/passwd") - - @reporter.step("Lock passwd on {cluster_node}") - def lock_passwd_on_node(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("sudo chattr +i /etc/passwd") - - def stop(self): - for k6_instance in self.k6_instances: - k6_instance.stop() - - self.cluster_state_controller.start_all_stopped_services() - - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - result = k6_instance.get_results() - results[k6_instance.loader.ip] = result - - parallel(self.restore_passwd_on_node, self.nodes_under_load) - - return results - - -class S3LocalRunner(LocalRunner): - endpoints: list[str] - k6_dir: str - - @reporter.step("Run preset on loaders") - def preset(self): - LocalRunner.preset(self) - with reporter.step(f"Resolve containers in preset"): - parallel(self._resolve_containers_in_preset, self.k6_instances) - - @reporter.step("Resolve containers in preset") - def _resolve_containers_in_preset(self, k6_instance: K6): - k6_instance.shell.exec( - f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" - ) - - @reporter.step("Init k6 instances") - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - self.k6_instances = [] - futures = parallel( - self._init_k6_instance_, - self.loaders, - load_params, - endpoints, - k6_dir, - ) - self.k6_instances = [future.result() for future in futures] - - def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str): - shell = loader.get_shell() - with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params.working_dir}") - # If we chmod /home/ folder we can no longer ssh to the node - # !! IMPORTANT !! - if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" - ): - shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") - - return K6( - load_params, - self.endpoints, - k6_dir, - shell, - loader, - self.user, - ) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Preparation steps") - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - self.k6_dir = k6_dir - parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes) - - @reporter.step("Prepare node {cluster_node}") - def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]): - LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) - self.endpoints = cluster_node.s3_gate.get_all_endpoints() - shell = cluster_node.host.get_shell() - - with reporter.step("Uninstall previous installation of aws cli"): - shell.exec(f"sudo rm -rf /usr/local/aws-cli") - shell.exec(f"sudo rm -rf /usr/local/bin/aws") - shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer") - - with reporter.step("Install aws cli"): - shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip") - shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}") - shell.exec(f"sudo {k6_dir}/aws/install") - - with reporter.step("Install requests python module"): - shell.exec(f"sudo apt-get -y install python3-pip") - shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}") - shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") - - with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): - configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py deleted file mode 100644 index 26b2441..0000000 --- a/src/frostfs_testlib/plugins/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -from importlib.metadata import entry_points -from typing import Any - - -def load_plugin(plugin_group: str, name: str) -> Any: - """Loads plugin using entry point specification. - - Args: - plugin_group: Name of plugin group that contains the plugin. - name: Name of the plugin in the group. - - Returns: - Plugin class if the plugin was found; otherwise returns None. - """ - plugins = entry_points(group=plugin_group) - if name not in plugins.names: - return None - plugin = plugins[name] - return plugin.load() - - -def load_all(group: str) -> Any: - """Loads all plugins using entry point specification. - - Args: - group: Name of plugin group. - - Returns: - Classes from specified group. - """ - plugins = entry_points(group=group) - return [plugin.load() for plugin in plugins] diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py deleted file mode 100644 index 071675a..0000000 --- a/src/frostfs_testlib/processes/remote_process.py +++ /dev/null @@ -1,280 +0,0 @@ -from __future__ import annotations - -import os -import uuid -from typing import Optional - -from tenacity import retry -from tenacity.stop import stop_after_attempt -from tenacity.wait import wait_fixed - -from frostfs_testlib import reporter -from frostfs_testlib.shell import Shell -from frostfs_testlib.shell.command_inspectors import SuInspector -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions - - -class RemoteProcess: - def __init__( - self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str - ): - self.process_dir = process_dir - self.cmd = cmd - self.stdout_last_line_number = 0 - self.stderr_last_line_number = 0 - self.pid: Optional[str] = None - self.proc_rc: Optional[int] = None - self.proc_start_time: Optional[int] = None - self.proc_end_time: Optional[int] = None - self.saved_stdout: Optional[str] = None - self.saved_stderr: Optional[str] = None - self.shell = shell - self.proc_id: str = proc_id - self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] - - @classmethod - @reporter.step("Create remote process") - def create( - cls, - command: str, - shell: Shell, - working_dir: str = "/tmp", - user: Optional[str] = None, - proc_id: Optional[str] = None, - ) -> RemoteProcess: - """ - Create a process on a remote host. - - Created dir for process with following files: - command.sh: script to execute - pid: contains process id - rc: contains script return code - stderr: contains script errors - stdout: contains script output - user: user on behalf whom command will be executed - proc_id: process string identificator - - Args: - shell: Shell instance - command: command to be run on a remote host - working_dir: working directory for the process - - Returns: - RemoteProcess instance for further examination - """ - if proc_id is None: - proc_id = f"{uuid.uuid4()}" - - cmd_inspector = SuInspector(user) if user else None - remote_process = cls( - cmd=command, - process_dir=os.path.join(working_dir, f"proc_{proc_id}"), - shell=shell, - cmd_inspector=cmd_inspector, - proc_id=proc_id, - ) - - return remote_process - - @reporter.step("Start remote process") - def start(self): - """ - Starts a process on a remote host. - """ - - self._create_process_dir() - self._generate_command_script() - self._start_process() - self.pid = self._get_pid() - - @reporter.step("Get process stdout") - def stdout(self, full: bool = False) -> str: - """ - Method to get process stdout, either fresh info or full. - - Args: - full: returns full stdout that we have to this moment - - Returns: - Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned. - If process is finished (proc_rc is not None) saved stdout is returned - """ - if self.saved_stdout is not None: - cur_stdout = self.saved_stdout - else: - terminal = self.shell.exec( - f"cat {self.process_dir}/stdout", - options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), - ) - if self.proc_rc is not None: - self.saved_stdout = terminal.stdout - cur_stdout = terminal.stdout - - if full: - return cur_stdout - whole_stdout = cur_stdout.split("\n") - if len(whole_stdout) > self.stdout_last_line_number: - resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :]) - self.stdout_last_line_number = len(whole_stdout) - return resulted_stdout - return "" - - @reporter.step("Get process stderr") - def stderr(self, full: bool = False) -> str: - """ - Method to get process stderr, either fresh info or full. - - Args: - full: returns full stderr that we have to this moment - - Returns: - Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned. - If process is finished (proc_rc is not None) saved stderr is returned - """ - if self.saved_stderr is not None: - cur_stderr = self.saved_stderr - else: - terminal = self.shell.exec( - f"cat {self.process_dir}/stderr", - options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), - ) - if self.proc_rc is not None: - self.saved_stderr = terminal.stdout - cur_stderr = terminal.stdout - if full: - return cur_stderr - whole_stderr = cur_stderr.split("\n") - if len(whole_stderr) > self.stderr_last_line_number: - resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :]) - self.stderr_last_line_number = len(whole_stderr) - return resulted_stderr - return "" - - @reporter.step("Get process rc") - def rc(self) -> Optional[int]: - if self.proc_rc is not None: - return self.proc_rc - - result = self._cat_proc_file("rc") - if not result: - return None - - self.proc_rc = int(result) - return self.proc_rc - - @reporter.step("Get process start time") - def start_time(self) -> Optional[int]: - if self.proc_start_time is not None: - return self.proc_start_time - - result = self._cat_proc_file("start_time") - if not result: - return None - - self.proc_start_time = int(result) - return self.proc_start_time - - @reporter.step("Get process end time") - def end_time(self) -> Optional[int]: - if self.proc_end_time is not None: - return self.proc_end_time - - result = self._cat_proc_file("end_time") - if not result: - return None - - self.proc_end_time = int(result) - return self.proc_end_time - - def _cat_proc_file(self, file: str) -> Optional[str]: - terminal = self.shell.exec( - f"cat {self.process_dir}/{file}", - CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), - ) - if "No such file or directory" in terminal.stderr: - return None - elif terminal.return_code != 0: - raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") - - return terminal.stdout - - @reporter.step("Check if process is running") - def running(self) -> bool: - return self.rc() is None - - @reporter.step("Send signal to process") - def send_signal(self, signal: int) -> None: - kill_res = self.shell.exec( - f"kill -{signal} {self.pid}", - CommandOptions(check=False, extra_inspectors=self.cmd_inspectors), - ) - if "No such process" in kill_res.stderr: - return - if kill_res.return_code: - raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") - - @reporter.step("Stop process") - def stop(self) -> None: - self.send_signal(15) - - @reporter.step("Kill process") - def kill(self) -> None: - self.send_signal(9) - - @reporter.step("Clear process directory") - def clear(self) -> None: - if self.process_dir == "/": - raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - - @reporter.step("Start remote process") - def _start_process(self) -> None: - self.shell.exec( - f"nohup {self.process_dir}/command.sh {self.process_dir}/stdout " - f"2>{self.process_dir}/stderr &", - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) - - @reporter.step("Create process directory") - def _create_process_dir(self) -> None: - self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - self.process_dir = terminal.stdout.strip() - - @reporter.step("Get pid") - @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) - def _get_pid(self) -> str: - terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) - assert terminal.stdout, f"invalid pid: {terminal.stdout}" - return terminal.stdout.strip() - - @reporter.step("Generate command script") - def _generate_command_script(self) -> None: - command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") - script = ( - f"#!/bin/bash\n" - f"cd {self.process_dir}\n" - f"date +%s > {self.process_dir}/start_time\n" - f"{command} &\n" - f"pid=\$!\n" - f"cd {self.process_dir}\n" - f"echo \$pid > {self.process_dir}/pid\n" - f"wait \$pid\n" - f"echo $? > {self.process_dir}/rc\n" - f"date +%s > {self.process_dir}/end_time\n" - ) - - self.shell.exec( - f'echo "{script}" > {self.process_dir}/command.sh', - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) - self.shell.exec( - f"cat {self.process_dir}/command.sh", - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) - self.shell.exec( - f"chmod +x {self.process_dir}/command.sh", - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py deleted file mode 100644 index 848c175..0000000 --- a/src/frostfs_testlib/reporter/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Any - -from frostfs_testlib.reporter.allure_handler import AllureHandler -from frostfs_testlib.reporter.interfaces import ReporterHandler -from frostfs_testlib.reporter.reporter import Reporter -from frostfs_testlib.reporter.steps_logger import StepsLogger - -__reporter = Reporter() - - -def get_reporter() -> Reporter: - """Returns reporter that the library should use for storing artifacts. - - Reporter is a singleton instance that can be configured with multiple handlers that store - artifacts in various systems. Most common use case is to use single handler. - - Returns: - Singleton reporter instance. - """ - return __reporter - - -def step(title: str): - return __reporter.step(title) - - -def attach(content: Any, file_name: str): - return __reporter.attach(content, file_name) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py deleted file mode 100644 index ef63638..0000000 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -from contextlib import AbstractContextManager, ContextDecorator -from textwrap import shorten -from typing import Any, Callable - -import allure -from allure import attachment_type - -from frostfs_testlib.reporter.interfaces import ReporterHandler - - -class AllureHandler(ReporterHandler): - """Handler that stores test artifacts in Allure report.""" - - def step(self, name: str) -> AbstractContextManager | ContextDecorator: - name = shorten(name, width=140, placeholder="...") - return allure.step(name) - - def step_decorator(self, name: str) -> Callable: - return allure.step(name) - - def attach(self, body: Any, file_name: str) -> None: - attachment_name, extension = os.path.splitext(file_name) - if extension.startswith("."): - extension = extension[1:] - attachment_type = self._resolve_attachment_type(extension) - - if os.path.exists(body): - allure.attach.file(body, file_name, attachment_type, extension) - else: - allure.attach(body, attachment_name, attachment_type, extension) - - def _resolve_attachment_type(self, extension: str) -> attachment_type: - """Try to find matching Allure attachment type by extension. - - If no match was found, default to TXT format. - """ - extension = extension.lower() - return next( - (allure_type for allure_type in attachment_type if allure_type.extension == extension), - attachment_type.TEXT, - ) diff --git a/src/frostfs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py deleted file mode 100644 index 4e24feb..0000000 --- a/src/frostfs_testlib/reporter/interfaces.py +++ /dev/null @@ -1,39 +0,0 @@ -from abc import ABC, abstractmethod -from contextlib import AbstractContextManager, ContextDecorator -from typing import Any, Callable - - -class ReporterHandler(ABC): - """Interface of handler that stores test artifacts in some reporting tool.""" - - @abstractmethod - def step(self, name: str) -> AbstractContextManager | ContextDecorator: - """Register a new step in test execution. - - Args: - name: Name of the step. - - Returns: - Step context. - """ - - @abstractmethod - def step_decorator(self, name: str) -> Callable: - """A step decorator from reporter. - - Args: - name: Name of the step. - - Returns: - decorator for the step - """ - - @abstractmethod - def attach(self, content: Any, file_name: str) -> None: - """Attach specified content with given file name to the test report. - - Args: - content: Content to attach. If content value is not a string, it will be - converted to a string. - file_name: File name of attachment. - """ diff --git a/src/frostfs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py deleted file mode 100644 index 2d1a43e..0000000 --- a/src/frostfs_testlib/reporter/reporter.py +++ /dev/null @@ -1,135 +0,0 @@ -from contextlib import AbstractContextManager, contextmanager -from functools import wraps -from types import TracebackType -from typing import Any, Callable, Optional - -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.reporter.interfaces import ReporterHandler -from frostfs_testlib.utils.func_utils import format_by_args - - -@contextmanager -def _empty_step(): - yield - - -class Reporter: - """Root reporter that sends artifacts to handlers.""" - - handlers: list[ReporterHandler] - - def __init__(self) -> None: - super().__init__() - self.handlers = [] - - def register_handler(self, handler: ReporterHandler) -> None: - """Register a new handler for the reporter. - - Args: - handler: Handler instance to add to the reporter. - """ - self.handlers.append(handler) - - def configure(self, config: dict[str, Any]) -> None: - """Configure handlers in the reporter from specified config. - - All existing handlers will be removed from the reporter. - - Args: - config: Dictionary with reporter configuration. - """ - # Reset current configuration - self.handlers = [] - - # Setup handlers from the specified config - handler_configs = config.get("handlers", []) - for handler_config in handler_configs: - handler_class = load_plugin("frostfs.testlib.reporter", handler_config["plugin_name"]) - self.register_handler(handler_class()) - - def step_deco(self, name: str) -> Callable: - """Register a new step in test execution in a decorator fashion. - - To note: the actual decoration with handlers is happening during target function call time. - - Args: - name: Name of the step. - - Returns: - decorated function - """ - - def deco(func): - @wraps(func) - def wrapper(*a, **kw): - resulting_func = func - for handler in self.handlers: - parsed_name = format_by_args(func, name, *a, **kw) - decorator = handler.step_decorator(parsed_name) - resulting_func = decorator(resulting_func) - - return resulting_func(*a, **kw) - - return wrapper - - return deco - - def step(self, name: str) -> AbstractContextManager: - """Register a new step in test execution. - - Args: - name: Name of the step. - - Returns: - Step context. - """ - step_contexts = [handler.step(name) for handler in self.handlers] - if not step_contexts: - step_contexts = [_empty_step()] - decorated_wrapper = self.step_deco(name) - return AggregateContextManager(step_contexts, decorated_wrapper) - - def attach(self, content: Any, file_name: str) -> None: - """Attach specified content with given file name to the test report. - - Args: - content: Content to attach. If content value is not a string, it will be - converted to a string. - file_name: File name of attachment. - """ - for handler in self.handlers: - handler.attach(content, file_name) - - -class AggregateContextManager(AbstractContextManager): - """Aggregates multiple context managers in a single context.""" - - contexts: list[AbstractContextManager] - - def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None: - super().__init__() - self.contexts = contexts - self.wrapper = decorated_wrapper - - def __enter__(self): - for context in self.contexts: - context.__enter__() - return self - - def __exit__( - self, - exc_type: Optional[type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> Optional[bool]: - suppress_decisions = [] - for context in self.contexts: - suppress_decision = context.__exit__(exc_type, exc_value, traceback) - suppress_decisions.append(suppress_decision) - - # If all context agreed to suppress exception, then suppress it; - # otherwise return None to reraise - return True if all(suppress_decisions) else None - - def __call__(self, *args: Any, **kwds: Any) -> Any: - return self.wrapper(*args, **kwds) diff --git a/src/frostfs_testlib/reporter/steps_logger.py b/src/frostfs_testlib/reporter/steps_logger.py deleted file mode 100644 index 4cdfb3d..0000000 --- a/src/frostfs_testlib/reporter/steps_logger.py +++ /dev/null @@ -1,56 +0,0 @@ -import logging -import threading -from contextlib import AbstractContextManager, ContextDecorator -from functools import wraps -from types import TracebackType -from typing import Any, Callable - -from frostfs_testlib.reporter.interfaces import ReporterHandler - - -class StepsLogger(ReporterHandler): - """Handler that prints steps to log.""" - - def step(self, name: str) -> AbstractContextManager | ContextDecorator: - return StepLoggerContext(name) - - def step_decorator(self, name: str) -> Callable: - return StepLoggerContext(name) - - def attach(self, body: Any, file_name: str) -> None: - pass - - -class StepLoggerContext(AbstractContextManager): - INDENT = {} - - def __init__(self, title: str): - self.title = title - self.logger = logging.getLogger("NeoLogger") - self.thread = threading.get_ident() - if self.thread not in StepLoggerContext.INDENT: - StepLoggerContext.INDENT[self.thread] = 1 - - def __enter__(self) -> Any: - indent = ">" * StepLoggerContext.INDENT[self.thread] - self.logger.info(f"[{self.thread}] {indent} {self.title}") - StepLoggerContext.INDENT[self.thread] += 1 - - def __exit__( - self, - __exc_type: type[BaseException] | None, - __exc_value: BaseException | None, - __traceback: TracebackType | None, - ) -> bool | None: - - StepLoggerContext.INDENT[self.thread] -= 1 - indent = "<" * StepLoggerContext.INDENT[self.thread] - self.logger.info(f"[{self.thread}] {indent} {self.title}") - - def __call__(self, func): - @wraps(func) - def impl(*a, **kw): - with self: - return func(*a, **kw) - - return impl diff --git a/src/frostfs_testlib/resources/__init__.py b/src/frostfs_testlib/resources/__init__.py deleted file mode 100644 index 71bb053..0000000 --- a/src/frostfs_testlib/resources/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.resources import common diff --git a/src/frostfs_testlib/resources/cli.py b/src/frostfs_testlib/resources/cli.py deleted file mode 100644 index 06a9832..0000000 --- a/src/frostfs_testlib/resources/cli.py +++ /dev/null @@ -1,12 +0,0 @@ -# Paths to CLI executables on machine that runs tests -import os - -NEOGO_EXECUTABLE = os.getenv("FROSTFS_EXECUTABLE", "neo-go") -FROSTFS_CLI_EXEC = os.getenv("FROSTFS_CLI_EXEC", "frostfs-cli") -FROSTFS_AUTHMATE_EXEC = os.getenv("FROSTFS_AUTHMATE_EXEC", "frostfs-s3-authmate") -FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") - -# Config for frostfs-adm utility. Optional if tests are running against devenv -FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") - -CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py deleted file mode 100644 index 53bcfaa..0000000 --- a/src/frostfs_testlib/resources/common.py +++ /dev/null @@ -1,56 +0,0 @@ -import os - -import yaml - -CONTAINER_WAIT_INTERVAL = "1m" - -SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000") -COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3") -COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") - -SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") - -STORAGE_USER_NAME = "frostfs-storage" - -MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") -MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") -FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") - -# Time interval that allows a GC pass on storage node (this includes GC sleep interval -# of 1min plus 15 seconds for GC pass itself) -STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s") - -GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf") -FROSTFS_CONTRACT = os.getenv("FROSTFS_IR_CONTRACTS_FROSTFS") - -ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir") - -# Password of wallet owned by user on behalf of whom we are running tests -# Default wallet password is empty -DEFAULT_WALLET_PASS = os.getenv("WALLET_PASS", "") - -# Artificial delay that we add after object deletion and container creation -# Delay is added because sometimes immediately after deletion object still appears -# to be existing (probably because tombstone object takes some time to replicate) -# TODO: remove this wait -S3_SYNC_WAIT_TIME = 5 - -# Generate wallet config -# TODO: we should move all info about wallet configs to fixtures -DEFAULT_WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml") -with open(DEFAULT_WALLET_CONFIG, "w") as file: - yaml.dump({"password": DEFAULT_WALLET_PASS}, file) - -# Number of attempts that S3 clients will attempt per each request (1 means single attempt -# without any retries) -MAX_REQUEST_ATTEMPTS = 5 -RETRY_MODE = "standard" -CREDENTIALS_CREATE_TIMEOUT = "1m" - - -HOSTING_CONFIG_FILE = os.getenv( - "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) -) - -MORE_LOG = os.getenv("MORE_LOG", "1") -EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py deleted file mode 100644 index 15e2977..0000000 --- a/src/frostfs_testlib/resources/error_patterns.py +++ /dev/null @@ -1,38 +0,0 @@ -# Regex patterns of status codes of Container service -CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" - -# Regex patterns of status codes of Object service -MALFORMED_REQUEST = "code = 1024.*message = malformed request" -OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied" -OBJECT_NOT_FOUND = "code = 2049.*message = object not found" -OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" -SESSION_NOT_FOUND = "code = 4096.*message = session token not found" -OUT_OF_RANGE = "code = 2053.*message = out of range" -EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" -ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" -# TODO: Change to codes with message -# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" -# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed -OBJECT_IS_LOCKED = "code = 2050" -LOCK_NON_REGULAR_OBJECT = "code = 2051" - -LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required" -LOCK_OBJECT_REMOVAL = "lock object removal" -LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}" -INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length" -INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" -INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" -INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" - -S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" -S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." - -RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" -# Errors from node missing reasons if request was forwarded. Commenting for now -# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" -RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" -NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" -# Errors from node missing reasons if request was forwarded. Commenting for now -# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" -NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py deleted file mode 100644 index ad3ed1c..0000000 --- a/src/frostfs_testlib/resources/load_params.py +++ /dev/null @@ -1,35 +0,0 @@ -import os - -# Background load node parameters -LOAD_NODES = os.getenv("LOAD_NODES", "").split() -# Must hardcode for now -LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "service") -LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD") -LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH") -LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE") -BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) -BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) -BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) -BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) -BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) -BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) -BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8)) -BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") - -# This will decrease load params for some weak environments -BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1) - -# Wait for 1 hour for xk6 verify scenario by default (in practice means like "unlimited" time) -BACKGROUND_LOAD_MAX_VERIFY_TIME = os.getenv("BACKGROUND_LOAD_VERIFY_MAX_TIME", 3600) -BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( - "BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" -) -BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") -PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") -PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20") -# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) -PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") -K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") -K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") -K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) -LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml") diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py deleted file mode 100644 index 6caf158..0000000 --- a/src/frostfs_testlib/resources/optionals.py +++ /dev/null @@ -1,25 +0,0 @@ -import os - - -def str_to_bool(input: str) -> bool: - return input in ["true", "True", "1"] - - -# Override these optional params to not comment/modify code during local development. Use with caution. -# Node under test. Set this to occupy exact node. -OPTIONAL_NODE_UNDER_TEST = os.getenv("OPTIONAL_NODE_UNDER_TEST") - -# Node under load. Set this to target load on exact node. -OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") - -# Set this to True to disable failover commands. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) - -# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) - -# Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) - -# Use cache for fixtures with @cachec_fixture decorator -OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py deleted file mode 100644 index a716bc5..0000000 --- a/src/frostfs_testlib/resources/s3_acl_grants.py +++ /dev/null @@ -1,9 +0,0 @@ -ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers" -ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"} -ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"} -CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} - -# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl -PRIVATE_GRANTS = [] -PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT] -PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/resources/wellknown_acl.py b/src/frostfs_testlib/resources/wellknown_acl.py deleted file mode 100644 index fe561b3..0000000 --- a/src/frostfs_testlib/resources/wellknown_acl.py +++ /dev/null @@ -1,11 +0,0 @@ -# ACLs with final flag -PUBLIC_ACL_F = "1FBFBFFF" -PRIVATE_ACL_F = "1C8C8CCC" -READONLY_ACL_F = "1FBF8CFF" - -# ACLs without final flag set -PUBLIC_ACL = "0FBFBFFF" -INACCESSIBLE_ACL = "40000000" -STICKY_BIT_PUB_ACL = "3FFFFFFF" - -EACL_PUBLIC_READ_WRITE = "eacl-public-read-write" diff --git a/src/frostfs_testlib/shell/__init__.py b/src/frostfs_testlib/shell/__init__.py deleted file mode 100644 index 980d119..0000000 --- a/src/frostfs_testlib/shell/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell -from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py deleted file mode 100644 index 8fe2f34..0000000 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ /dev/null @@ -1,29 +0,0 @@ -from frostfs_testlib.shell.interfaces import CommandInspector - - -class SudoInspector(CommandInspector): - """Prepends command with sudo. - - If command is already prepended with sudo, then has no effect. - """ - - def inspect(self, original_command: str, command: str) -> str: - if not command.startswith("sudo"): - return f"sudo {command}" - return command - - -class SuInspector(CommandInspector): - """Allows to run command as another user via sudo su call - - If command is already prepended with sudo su, then has no effect. - """ - - def __init__(self, user: str) -> None: - self.user = user - - def inspect(self, original_command: str, command: str) -> str: - if not original_command.startswith("sudo su"): - cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$") - return f'sudo su - {self.user} -c "{cmd}"' - return original_command diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py deleted file mode 100644 index a8d3325..0000000 --- a/src/frostfs_testlib/shell/interfaces.py +++ /dev/null @@ -1,113 +0,0 @@ -from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import Optional - -from frostfs_testlib.defaults import Options - - -@dataclass -class InteractiveInput: - """Interactive input for a shell command. - - Attributes: - prompt_pattern: Regular expression that defines expected prompt from the command. - input: User input that should be supplied to the command in response to the prompt. - """ - - prompt_pattern: str - input: str - - -class CommandInspector(ABC): - """Interface of inspector that processes command text before execution.""" - - @abstractmethod - def inspect(self, original_command: str, command: str) -> str: - """Transforms command text and returns modified command. - - Args: - command: Command to transform with this inspector. - original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command - - Returns: - Transformed command text. - """ - - -@dataclass -class CommandOptions: - """Options that control command execution. - - Attributes: - interactive_inputs: User inputs that should be interactively supplied to - the command during execution. - close_stdin: Controls whether stdin stream should be closed after feeding interactive - inputs or after requesting non-interactive command. If shell implementation does not - support this functionality, it should ignore this flag without raising an error. - timeout: Timeout for command execution (in seconds). - check: Controls whether to check return code of the command. Set to False to - ignore non-zero return codes. - no_log: Do not print output to logger if True. - extra_inspectors: Exctra command inspectors to process command - """ - - interactive_inputs: Optional[list[InteractiveInput]] = None - close_stdin: bool = False - timeout: Optional[int] = None - check: bool = True - no_log: bool = False - extra_inspectors: Optional[list[CommandInspector]] = None - - def __post_init__(self): - if self.timeout is None: - self.timeout = Options.get_default_shell_timeout() - - -@dataclass -class SshCredentials: - """Represents ssh credentials. - - Attributes: - ssh_login: ssh login. - ssh_password: ssh password as plain text (unsecure, for local setup only). - ssh_key_path: path to a ssh key file. - ssh_key_passphrase: passphrase to ssh key file. - """ - - ssh_login: str - ssh_password: Optional[str] = None - ssh_key_path: Optional[str] = None - ssh_key_passphrase: Optional[str] = None - - -@dataclass -class CommandResult: - """Represents a result of a command executed via shell. - - Attributes: - stdout: Complete content of stdout stream. - stderr: Complete content of stderr stream. - return_code: Return code (or exit code) of the command's process. - """ - - stdout: str - stderr: str - return_code: int - - -class Shell(ABC): - """Interface of a command shell on some system (local or remote).""" - - @abstractmethod - def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: - """Executes specified command on this shell. - - To execute interactive command, user inputs should be specified in *options*. - - Args: - command: Command to execute on the shell. - options: Options that control command execution. - - Returns: - Command's result. - """ diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py deleted file mode 100644 index c0f3b06..0000000 --- a/src/frostfs_testlib/shell/local_shell.py +++ /dev/null @@ -1,146 +0,0 @@ -import logging -import subprocess -import tempfile -from contextlib import nullcontext -from datetime import datetime -from typing import IO, Optional - -import pexpect - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import MORE_LOG -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell - -logger = logging.getLogger("frostfs.testlib.shell") -step_context = reporter.step if MORE_LOG == "1" else nullcontext - - -class LocalShell(Shell): - """Implements command shell on a local machine.""" - - def __init__(self, command_inspectors: Optional[list[CommandInspector]] = None) -> None: - super().__init__() - self.command_inspectors = command_inspectors or [] - - def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: - # If no options were provided, use default options - options = options or CommandOptions() - - original_command = command - extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] - for inspector in [*self.command_inspectors, *extra_inspectors]: - command = inspector.inspect(original_command, command) - - with step_context(f"Executing command: {command}"): - if options.interactive_inputs: - return self._exec_interactive(command, options) - return self._exec_non_interactive(command, options) - - def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - start_time = datetime.utcnow() - log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output - - try: - command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True) - except (pexpect.ExceptionPexpect, OSError) as exc: - raise RuntimeError(f"Command: {command}") from exc - - command_process.delaybeforesend = 1 - command_process.logfile_read = log_file - - try: - for interactive_input in options.interactive_inputs: - command_process.expect(interactive_input.prompt_pattern) - command_process.sendline(interactive_input.input) - except (pexpect.ExceptionPexpect, OSError) as exc: - if options.check: - raise RuntimeError(f"Command: {command}") from exc - finally: - result = self._get_pexpect_process_result(command_process) - log_file.close() - end_time = datetime.utcnow() - self._report_command_result(command, start_time, end_time, result) - - if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" - ) - return result - - def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: - start_time = datetime.utcnow() - result = None - - try: - command_process = subprocess.run( - command, - check=options.check, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - timeout=options.timeout, - shell=True, - ) - - result = CommandResult( - stdout=command_process.stdout or "", - stderr="", - return_code=command_process.returncode, - ) - except subprocess.CalledProcessError as exc: - # TODO: always set check flag to false and capture command result normally - result = CommandResult( - stdout=exc.stdout or "", - stderr="", - return_code=exc.returncode, - ) - raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc - except OSError as exc: - raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc - finally: - end_time = datetime.utcnow() - self._report_command_result(command, start_time, end_time, result) - return result - - def _get_pexpect_process_result(self, command_process: pexpect.spawn) -> CommandResult: - """ - Captures output of the process. - """ - # Wait for child process to end it's work - if command_process.isalive(): - command_process.expect(pexpect.EOF) - - # Close the process to obtain the exit code - command_process.close() - return_code = command_process.exitstatus - - # Capture output from the log file - log_file: IO[bytes] = command_process.logfile_read - log_file.seek(0) - output = log_file.read().decode() - - return CommandResult(stdout=output, stderr="", return_code=return_code) - - def _report_command_result( - self, - command: str, - start_time: datetime, - end_time: datetime, - result: Optional[CommandResult], - ) -> None: - if not result: - logger.warning(f"Command: {command}\n" f"Error: result is None") - return - - status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) - log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") - - elapsed_time = end_time - start_time - command_attachment = ( - f"COMMAND: {command}\n" - f"RETCODE: {result.return_code}\n\n" - f"STDOUT:\n{result.stdout}\n" - f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" - ) - reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py deleted file mode 100644 index 3f13dca..0000000 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ /dev/null @@ -1,324 +0,0 @@ -import logging -import socket -import textwrap -from datetime import datetime -from functools import lru_cache, wraps -from time import sleep -from typing import ClassVar, Optional, Tuple - -from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception -from paramiko.ssh_exception import AuthenticationException - -from frostfs_testlib import reporter -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials - -logger = logging.getLogger("frostfs.testlib.shell") - - -class SshConnectionProvider: - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 - SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 - CONNECTION_TIMEOUT = 60 - - instance = None - connections: dict[str, SSHClient] = {} - creds: dict[str, SshCredentials] = {} - - def __new__(cls): - if not cls.instance: - cls.instance = super(SshConnectionProvider, cls).__new__(cls) - return cls.instance - - def store_creds(self, host: str, ssh_creds: SshCredentials): - self.creds[host] = ssh_creds - - def provide(self, host: str, port: str) -> SSHClient: - if host not in self.creds: - raise RuntimeError(f"Please add credentials for host {host}") - - if host in self.connections: - client = self.connections[host] - if client: - return client - - creds = self.creds[host] - client = self._create_connection(host, port, creds) - self.connections[host] = client - return client - - def drop(self, host: str): - if host in self.connections: - client = self.connections.pop(host) - client.close() - - def drop_all(self): - hosts = list(self.connections.keys()) - for host in hosts: - self.drop(host) - - def _create_connection( - self, - host: str, - port: str, - creds: SshCredentials, - ) -> SSHClient: - for attempt in range(self.SSH_CONNECTION_ATTEMPTS): - connection = SSHClient() - connection.set_missing_host_key_policy(AutoAddPolicy()) - try: - if creds.ssh_key_path: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" - ) - connection.connect( - hostname=host, - port=port, - username=creds.ssh_login, - pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase), - timeout=self.CONNECTION_TIMEOUT, - ) - else: - logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") - connection.connect( - hostname=host, - port=port, - username=creds.ssh_login, - password=creds.ssh_password, - timeout=self.CONNECTION_TIMEOUT, - ) - return connection - except AuthenticationException: - connection.close() - logger.exception(f"Can't connect to host {host}") - raise - except ( - SSHException, - ssh_exception.NoValidConnectionsError, - AttributeError, - socket.timeout, - OSError, - ) as exc: - connection.close() - can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS - if can_retry: - logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") - sleep(self.SSH_ATTEMPTS_INTERVAL) - continue - logger.exception(f"Can't connect to host {host}") - raise HostIsNotAvailable(host) from exc - - -class HostIsNotAvailable(Exception): - """Raised when host is not reachable via SSH connection.""" - - def __init__(self, host: Optional[str] = None): - msg = f"Host {host} is not available" - super().__init__(msg) - - -def log_command(func): - @wraps(func) - def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult: - command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") - with reporter.step(command_info): - logger.info(f'Execute command "{command}" on "{shell.host}"') - - start_time = datetime.utcnow() - result = func(shell, command, options, *args, **kwargs) - end_time = datetime.utcnow() - - elapsed_time = end_time - start_time - log_message = ( - f"HOST: {shell.host}\n" - f"COMMAND:\n{textwrap.indent(command, ' ')}\n" - f"RC:\n {result.return_code}\n" - f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" - f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" - ) - - if not options.no_log: - logger.info(log_message) - - reporter.attach(log_message, "SSH command.txt") - return result - - return wrapper - - -@lru_cache -def _load_private_key(file_path: str, password: Optional[str]) -> PKey: - """Loads private key from specified file. - - We support several type formats, however paramiko doesn't provide functionality to determine - key type in advance. So we attempt to load file with each of the supported formats and then - cache the result so that we don't need to figure out type again on subsequent calls. - """ - logger.debug(f"Loading ssh key from {file_path}") - for key_type in (Ed25519Key, ECDSAKey, RSAKey): - try: - return key_type.from_private_key_file(file_path, password) - except SSHException as ex: - logger.warn(f"SSH key {file_path} can't be loaded with {key_type}: {ex}") - continue - raise SSHException(f"SSH key {file_path} is not supported") - - -class SSHShell(Shell): - """Implements command shell on a remote machine via SSH connection.""" - - # Time in seconds to delay after remote command has completed. The delay is required - # to allow remote command to flush its output buffer - DELAY_AFTER_EXIT = 0.2 - - def __init__( - self, - host: str, - login: str, - password: Optional[str] = None, - private_key_path: Optional[str] = None, - private_key_passphrase: Optional[str] = None, - port: str = "22", - command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None, - ) -> None: - super().__init__() - self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) - self.host = host - self.port = port - - self.command_inspectors = command_inspectors or [] - - self.environment = custom_environment - - @property - def _connection(self): - return self.connection_provider.provide(self.host, self.port) - - def drop(self): - self.connection_provider.drop(self.host) - - def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: - options = options or CommandOptions() - - original_command = command - extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] - for inspector in [*self.command_inspectors, *extra_inspectors]: - command = inspector.inspect(original_command, command) - - if options.interactive_inputs: - result = self._exec_interactive(command, options) - else: - result = self._exec_non_interactive(command, options) - - if options.check and result.return_code != 0: - raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") - return result - - @log_command - def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) - for interactive_input in options.interactive_inputs: - input = interactive_input.input - if not input.endswith("\n"): - input = f"{input}\n" - try: - stdin.write(input) - except OSError: - logger.exception(f"Error while feeding {input} into command {command}") - - if options.close_stdin: - stdin.close() - sleep(self.DELAY_AFTER_EXIT) - - decoded_stdout, decoded_stderr = self._read_channels(stdout.channel, stderr.channel) - return_code = stdout.channel.recv_exit_status() - - result = CommandResult( - stdout=decoded_stdout, - stderr=decoded_stderr, - return_code=return_code, - ) - return result - - @log_command - def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: - try: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) - - if options.close_stdin: - stdin.close() - - decoded_stdout, decoded_stderr = self._read_channels(stdout.channel, stderr.channel) - return_code = stdout.channel.recv_exit_status() - - return CommandResult( - stdout=decoded_stdout, - stderr=decoded_stderr, - return_code=return_code, - ) - except ( - SSHException, - TimeoutError, - ssh_exception.NoValidConnectionsError, - ConnectionResetError, - AttributeError, - socket.timeout, - ) as exc: - logger.exception(f"Can't execute command {command} on host: {self.host}") - self.drop() - raise HostIsNotAvailable(self.host) from exc - - def _read_channels( - self, - stdout: Channel, - stderr: Channel, - chunk_size: int = 4096, - ) -> Tuple[str, str]: - """Reads data from stdout/stderr channels. - - Reading channels is required before we wait for exit status of the remote process. - Otherwise waiting step will hang indefinitely, see the warning from paramiko docs: - # https://docs.paramiko.org/en/stable/api/channel.html#paramiko.channel.Channel.recv_exit_status - - Args: - stdout: Channel of stdout stream of the remote process. - stderr: Channel of stderr stream of the remote process. - chunk_size: Max size of data chunk that we read from channel at a time. - - Returns: - Tuple with stdout and stderr channels decoded into strings. - """ - # We read data in chunks - stdout_chunks = [] - stderr_chunks = [] - - # Read from channels (if data is ready) until process exits - while not stdout.exit_status_ready(): - if stdout.recv_ready(): - stdout_chunks.append(stdout.recv(chunk_size)) - if stderr.recv_stderr_ready(): - stderr_chunks.append(stderr.recv_stderr(chunk_size)) - - # Wait for command to complete and flush its buffer before we read final output - sleep(self.DELAY_AFTER_EXIT) - - # Read the remaining data from the channels: - # If channel returns empty data chunk, it means that all data has been read - while True: - data_chunk = stdout.recv(chunk_size) - if not data_chunk: - break - stdout_chunks.append(data_chunk) - while True: - data_chunk = stderr.recv_stderr(chunk_size) - if not data_chunk: - break - stderr_chunks.append(data_chunk) - - # Combine chunks and decode results into regular strings - full_stdout = b"".join(stdout_chunks) - full_stderr = b"".join(stderr_chunks) - - return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) diff --git a/src/frostfs_testlib/steps/__init__.py b/src/frostfs_testlib/steps/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py deleted file mode 100644 index da407b6..0000000 --- a/src/frostfs_testlib/steps/acl.py +++ /dev/null @@ -1,176 +0,0 @@ -import base64 -import json -import logging -import os -import uuid -from time import sleep -from typing import List, Optional, Union - -import base58 - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils import wallet_utils - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Get extended ACL") -def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - try: - result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) - except RuntimeError as exc: - logger.info("Extended ACL table is not set for this container") - logger.info(f"Got exception while getting eacl: {exc}") - return None - if "extended ACL table is not set for this container" in result.stdout: - return None - return result.stdout - - -@reporter.step("Set extended ACL") -def set_eacl( - wallet: WalletInfo, - cid: str, - eacl_table_path: str, - shell: Shell, - endpoint: str, - session_token: Optional[str] = None, -) -> None: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - cli.container.set_eacl( - rpc_endpoint=endpoint, - cid=cid, - table=eacl_table_path, - await_mode=True, - session=session_token, - ) - - -def _encode_cid_for_eacl(cid: str) -> str: - cid_base58 = base58.b58decode(cid) - return base64.b64encode(cid_base58).decode("utf-8") - - -def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: - table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) - cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) - - with open(table_file_path, "r") as file: - table_data = file.read() - logger.info(f"Generated eACL:\n{table_data}") - - return table_file_path - - -def form_bearertoken_file( - wallet: WalletInfo, - cid: str, - eacl_rule_list: List[Union[EACLRule, EACLPubKey]], - shell: Shell, - endpoint: str, - sign: Optional[bool] = True, -) -> str: - """ - This function fetches eACL for given on behalf of , - then extends it with filters taken from , signs - with bearer token and writes to file - """ - enc_cid = _encode_cid_for_eacl(cid) if cid else None - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - - eacl = get_eacl(wallet, cid, shell, endpoint) - json_eacl = dict() - if eacl: - eacl = eacl.replace("eACL: ", "").split("Signature")[0] - json_eacl = json.loads(eacl) - logger.info(json_eacl) - eacl_result = { - "body": { - "eaclTable": {"containerID": {"value": enc_cid} if cid else enc_cid, "records": []}, - "lifetime": {"exp": EACL_LIFETIME, "nbf": "1", "iat": "0"}, - } - } - - assert eacl_rules, "Got empty eacl_records list" - for rule in eacl_rule_list: - op_data = { - "operation": rule.operation.value.upper(), - "action": rule.access.value.upper(), - "filters": rule.filters or [], - "targets": [], - } - - if isinstance(rule.role, EACLRole): - op_data["targets"] = [{"role": rule.role.value.upper()}] - elif isinstance(rule.role, EACLPubKey): - op_data["targets"] = [{"keys": rule.role.keys}] - - eacl_result["body"]["eaclTable"]["records"].append(op_data) - - # Add records from current eACL - if "records" in json_eacl.keys(): - for record in json_eacl["records"]: - eacl_result["body"]["eaclTable"]["records"].append(record) - - with open(file_path, "w", encoding="utf-8") as eacl_file: - json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4) - - logger.info(f"Got these extended ACL records: {eacl_result}") - if sign: - sign_bearer( - shell=shell, - wallet=wallet, - eacl_rules_file_from=file_path, - eacl_rules_file_to=file_path, - json=True, - ) - return file_path - - -def eacl_rules(access: str, verbs: list, user: str) -> list[str]: - """ - This function creates a list of eACL rules. - Args: - access (str): identifies if the following operation(s) - is allowed or denied - verbs (list): a list of operations to set rules for - user (str): a group of users (user/others) or a wallet of - a certain user for whom rules are set - Returns: - (list): a list of eACL rules - """ - if user not in ("others", "user"): - pubkey = wallet_utils.get_wallet_public_key(user, wallet_password="") - user = f"pubkey:{pubkey}" - - rules = [] - for verb in verbs: - rule = f"{access} {verb} {user}" - rules.append(rule) - return rules - - -def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: - frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) - - -@reporter.step("Wait for eACL cache expired") -def wait_for_cache_expired(): - sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) - return - - -@reporter.step("Return bearer token in base64 to caller") -def bearer_token_base64_from_file(bearer_path: str) -> str: - with open(bearer_path, "rb") as file: - signed = file.read() - return base64.b64encode(signed).decode("utf-8") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py deleted file mode 100644 index 092b1a3..0000000 --- a/src/frostfs_testlib/steps/cli/container.py +++ /dev/null @@ -1,351 +0,0 @@ -import json -import logging -import re -from dataclasses import dataclass -from time import sleep -from typing import Optional, Union - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils import json_utils -from frostfs_testlib.utils.file_utils import generate_file, get_file_hash - -logger = logging.getLogger("NeoLogger") - - -@dataclass -class StorageContainerInfo: - id: str - wallet: WalletInfo - - -class StorageContainer: - def __init__( - self, - storage_container_info: StorageContainerInfo, - shell: Shell, - cluster: Cluster, - ) -> None: - self.shell = shell - self.storage_container_info = storage_container_info - self.cluster = cluster - - def get_id(self) -> str: - return self.storage_container_info.id - - def get_wallet(self) -> str: - return self.storage_container_info.wallet - - @reporter.step("Generate new object and put in container") - def generate_object( - self, - size: int, - expire_at: Optional[int] = None, - bearer_token: Optional[str] = None, - endpoint: Optional[str] = None, - ) -> StorageObjectInfo: - with reporter.step(f"Generate object with size {size}"): - file_path = generate_file(size) - file_hash = get_file_hash(file_path) - - container_id = self.get_id() - wallet = self.get_wallet() - with reporter.step(f"Put object with size {size} to container {container_id}"): - if endpoint: - object_id = put_object( - wallet=wallet, - path=file_path, - cid=container_id, - expire_at=expire_at, - shell=self.shell, - endpoint=endpoint, - bearer=bearer_token, - ) - else: - object_id = put_object_to_random_node( - wallet=wallet, - path=file_path, - cid=container_id, - expire_at=expire_at, - shell=self.shell, - cluster=self.cluster, - bearer=bearer_token, - ) - - storage_object = StorageObjectInfo( - container_id, - object_id, - size=size, - wallet=wallet, - file_path=file_path, - file_hash=file_hash, - ) - - return storage_object - - -DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" -SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" -REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" -DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" - - -@reporter.step("Create Container") -def create_container( - wallet: WalletInfo, - shell: Shell, - endpoint: str, - rule: str = DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", - attributes: Optional[dict] = None, - session_token: str = "", - name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, - wait_for_creation: bool = True, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> str: - """ - A wrapper for `frostfs-cli container create` call. - - Args: - wallet (WalletInfo): a wallet on whose behalf a container is created - rule (optional, str): placement rule for container - basic_acl (optional, str): an ACL for container, will be - appended to `--basic-acl` key - attributes (optional, dict): container attributes , will be - appended to `--attributes` key - session_token (optional, str): a path to session token file - session_wallet(optional, str): a path to the wallet which signed - the session token; this parameter makes sense - when paired with `session_token` - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - options (optional, dict): any other options to pass to the call - name (optional, str): container name attribute - await_mode (bool): block execution until container is persisted - wait_for_creation (): Wait for container shows in container list - timeout: Timeout for the operation. - - Returns: - (str): CID of the created container - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.create( - rpc_endpoint=endpoint, - policy=rule, - nns_name=nns_name, - nns_zone=nns_zone, - basic_acl=basic_acl, - attributes=attributes, - name=name, - session=session_token, - await_mode=await_mode, - timeout=timeout, - **options or {}, - ) - - cid = _parse_cid(result.stdout) - - logger.info("Container created; waiting until it is persisted in the sidechain") - - if wait_for_creation: - wait_for_container_creation(wallet, cid, shell, endpoint) - - return cid - - -def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): - for _ in range(attempts): - containers = list_containers(wallet, shell, endpoint) - if cid in containers: - return - logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") - sleep(sleep_interval) - raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") - - -def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): - for _ in range(attempts): - try: - get_container(wallet, cid, shell=shell, endpoint=endpoint) - sleep(sleep_interval) - continue - except Exception as err: - if "container not found" not in str(err): - raise AssertionError(f'Expected "container not found" in error, got\n{err}') - return - raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") - - -@reporter.step("List Containers") -def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: - """ - A wrapper for `frostfs-cli container list` call. It returns all the - available containers for the given wallet. - Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - Returns: - (list): list of containers - """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) - return result.stdout.split() - - -@reporter.step("List Objects in container") -def list_objects( - wallet: WalletInfo, - shell: Shell, - container_id: str, - endpoint: str, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[str]: - """ - A wrapper for `frostfs-cli container list-objects` call. It returns all the - available objects in container. - Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers objects - shell: executor for cli command - container_id: cid of container - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - Returns: - (list): list of containers - """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) - logger.info(f"Container objects: \n{result}") - return result.stdout.split() - - -@reporter.step("Get Container") -def get_container( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - json_mode: bool = True, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> Union[dict, str]: - """ - A wrapper for `frostfs-cli container get` call. It extracts container's - attributes and rearranges them into a more compact view. - Args: - wallet (WalletInfo): path to a wallet on whose behalf we get the container - cid (str): ID of the container to get - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - json_mode (bool): return container in JSON format - timeout: Timeout for the operation. - Returns: - (dict, str): dict of container attributes - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) - - if not json_mode: - return result.stdout - - container_info = json.loads(result.stdout) - attributes = dict() - for attr in container_info["attributes"]: - attributes[attr["key"]] = attr["value"] - container_info["attributes"] = attributes - container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) - return container_info - - -@reporter.step("Delete Container") -# TODO: make the error message about a non-found container more user-friendly -def delete_container( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - force: bool = False, - session_token: Optional[str] = None, - await_mode: bool = False, -) -> None: - """ - A wrapper for `frostfs-cli container delete` call. - Args: - await_mode: Block execution until container is removed. - wallet (WalletInfo): path to a wallet on whose behalf we delete the container - cid (str): ID of the container to delete - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - force (bool): do not check whether container contains locks and remove immediately - session_token: a path to session token file - This function doesn't return anything. - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - cli.container.delete( - cid=cid, - rpc_endpoint=endpoint, - force=force, - session=session_token, - await_mode=await_mode, - ) - - -def _parse_cid(output: str) -> str: - """ - Parses container ID from a given CLI output. The input string we expect: - container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN - awaiting... - container has been persisted on sidechain - We want to take 'container ID' value from the string. - - Args: - output (str): CLI output to parse - - Returns: - (str): extracted CID - """ - try: - # taking first line from command's output - first_line = output.split("\n")[0] - except Exception: - first_line = "" - logger.error(f"Got empty output: {output}") - splitted = first_line.split(": ") - if len(splitted) != 2: - raise ValueError(f"no CID was parsed from command output: \t{first_line}") - return splitted[1] - - -@reporter.step("Search for nodes with a container") -def search_nodes_with_container( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - cluster: Cluster, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[ClusterNode]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) - - pattern = r"[0-9]+(?:\.[0-9]+){3}" - nodes_ip = list(set(re.findall(pattern, result.stdout))) - - with reporter.step(f"nodes ips = {nodes_ip}"): - nodes_list = cluster.get_nodes_by_ip(nodes_ip) - - with reporter.step(f"Return nodes - {nodes_list}"): - return nodes_list diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py deleted file mode 100644 index 7f8391d..0000000 --- a/src/frostfs_testlib/steps/cli/object.py +++ /dev/null @@ -1,762 +0,0 @@ -import json -import logging -import os -import re -import uuid -from typing import Any, Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.cli.neogo import NeoGo -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing import wait_for_success -from frostfs_testlib.utils import json_utils -from frostfs_testlib.utils.cli_utils import parse_netmap_output -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Get object from random node") -def get_object_from_random_node( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> str: - """ - GET from FrostFS random storage node - - Args: - wallet: wallet on whose behalf GET is done - cid: ID of Container where we get the Object from - oid: Object ID - shell: executor for cli command - cluster: cluster object - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - write_object (optional, str): path to downloaded file, appends to `--file` key - no_progress(optional, bool): do not show progress bar - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): path to downloaded file - """ - endpoint = cluster.get_random_storage_rpc_endpoint() - return get_object( - wallet, - cid, - oid, - shell, - endpoint, - bearer, - write_object, - xhdr, - no_progress, - session, - timeout, - ) - - -@reporter.step("Get object from {endpoint}") -def get_object( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> TestFile: - """ - GET from FrostFS. - - Args: - wallet (WalletInfo): wallet on whose behalf GET is done - cid (str): ID of Container where we get the Object from - oid (str): Object ID - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - write_object: path to downloaded file, appends to `--file` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - no_progress(optional, bool): do not show progress bar - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): path to downloaded file - """ - - if not write_object: - write_object = str(uuid.uuid4()) - test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - cli.object.get( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - file=test_file, - bearer=bearer, - no_progress=no_progress, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - return test_file - - -@reporter.step("Get Range Hash from {endpoint}") -def get_range_hash( - wallet: WalletInfo, - cid: str, - oid: str, - range_cut: str, - shell: Shell, - endpoint: str, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - GETRANGEHASH of given Object. - - Args: - wallet: wallet on whose behalf GETRANGEHASH is done - cid: ID of Container where we get the Object from - oid: Object ID - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - range_cut: Range to take hash from in the form offset1:length1,..., - value to pass to the `--range` parameter - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - xhdr: Request X-Headers in form of Key=Values - session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. - timeout: Timeout for the operation. - Returns: - None - """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.object.hash( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=range_cut, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - # cutting off output about range offset and length - return result.stdout.split(":")[1].strip() - - -@reporter.step("Put object to random node") -def put_object_to_random_node( - wallet: WalletInfo, - path: str, - cid: str, - shell: Shell, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - PUT of given file to a random storage node. - - Args: - wallet: wallet on whose behalf PUT is done - path: path to file to be PUT - cid: ID of Container where we get the Object from - shell: executor for cli command - cluster: cluster under test - bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call - attributes: User attributes in form of Key1=Value1,Key2=Value2 - cluster: cluster under test - no_progress: do not show progress bar - expire_at: Last epoch in the life of the object - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - ID of uploaded Object - """ - - endpoint = cluster.get_random_storage_rpc_endpoint() - return put_object( - wallet, - path, - cid, - shell, - endpoint, - bearer, - copies_number, - attributes, - xhdr, - expire_at, - no_progress, - session, - timeout=timeout, - ) - - -@reporter.step("Put object at {endpoint} in container {cid}") -def put_object( - wallet: WalletInfo, - path: str, - cid: str, - shell: Shell, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - PUT of given file. - - Args: - wallet: wallet on whose behalf PUT is done - path: path to file to be PUT - cid: ID of Container where we get the Object from - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call - attributes: User attributes in form of Key1=Value1,Key2=Value2 - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - no_progress: do not show progress bar - expire_at: Last epoch in the life of the object - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): ID of uploaded Object - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.object.put( - rpc_endpoint=endpoint, - file=path, - cid=cid, - attributes=attributes, - bearer=bearer, - copies_number=copies_number, - expire_at=expire_at, - no_progress=no_progress, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - # Splitting CLI output to separate lines and taking the penultimate line - id_str = result.stdout.strip().split("\n")[-2] - oid = id_str.split(":")[1] - return oid.strip() - - -@reporter.step("Delete object {cid}/{oid} from {endpoint}") -def delete_object( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - DELETE an Object. - - Args: - wallet: wallet on whose behalf DELETE is done - cid: ID of Container where we get the Object from - oid: ID of Object we are going to delete - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): Tombstone ID - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.object.delete( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - id_str = result.stdout.split("\n")[1] - tombstone = id_str.split(":")[1] - return tombstone.strip() - - -@reporter.step("Get Range") -def get_range( - wallet: WalletInfo, - cid: str, - oid: str, - range_cut: str, - shell: Shell, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - GETRANGE an Object. - - Args: - wallet: wallet on whose behalf GETRANGE is done - cid: ID of Container where we get the Object from - oid: ID of Object we are going to request - range_cut: range to take data from in the form offset:length - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - bearer: path to Bearer Token file, appends to `--bearer` key - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str, bytes) - path to the file with range content and content of this file as bytes - """ - test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - cli.object.range( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=range_cut, - file=test_file, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - with open(test_file, "rb") as file: - content = file.read() - return test_file, content - - -@reporter.step("Lock Object") -def lock_object( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> str: - """ - Locks object in container. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - oid: Object ID. - lifetime: Lock lifetime. - expire_at: Lock expiration epoch. - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - session: Path to a JSON-encoded container session token. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation. - - Returns: - Lock object ID - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.object.lock( - rpc_endpoint=endpoint, - lifetime=lifetime, - expire_at=expire_at, - address=address, - cid=cid, - oid=oid, - bearer=bearer, - xhdr=xhdr, - session=session, - ttl=ttl, - timeout=timeout, - ) - - # Splitting CLI output to separate lines and taking the penultimate line - id_str = result.stdout.strip().split("\n")[0] - oid = id_str.split(":")[1] - return oid.strip() - - -@reporter.step("Search object") -def search_object( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - bearer: str = "", - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list: - """ - SEARCH an Object. - - Args: - wallet: wallet on whose behalf SEARCH is done - cid: ID of Container where we get the Object from - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - filters: key=value pairs to filter Objects - expected_objects_list: a list of ObjectIDs to compare found Objects with - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - phy: Search physically stored objects. - root: Search for user objects. - timeout: Timeout for the operation. - - Returns: - list of found ObjectIDs - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.object.search( - rpc_endpoint=endpoint, - cid=cid, - bearer=bearer, - xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, - session=session, - phy=phy, - root=root, - timeout=timeout, - ) - - found_objects = re.findall(r"(\w{43,44})", result.stdout) - - if expected_objects_list: - if sorted(found_objects) == sorted(expected_objects_list): - logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") - else: - logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") - - return found_objects - - -@reporter.step("Get netmap netinfo") -def get_netmap_netinfo( - wallet: WalletInfo, - shell: Shell, - endpoint: str, - address: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> dict[str, Any]: - """ - Get netmap netinfo output from node - - Args: - wallet (WalletInfo): wallet on whose behalf request is done - shell: executor for cli command - endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - address: Address of wallet account - ttl: TTL value in request meta header (default 2) - wallet: Path to the wallet or binary key - xhdr: Request X-Headers in form of Key=Value - timeout: Timeout for the operation. - - Returns: - (dict): dict of parsed command output - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - output = cli.netmap.netinfo( - rpc_endpoint=endpoint, - address=address, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - - settings = dict() - - patterns = [ - (re.compile("(.*): (\d+)"), int), - (re.compile("(.*): (false|true)"), bool), - (re.compile("(.*): (\d+\.\d+)"), float), - ] - for pattern, func in patterns: - for setting, value in re.findall(pattern, output.stdout): - settings[setting.lower().strip().replace(" ", "_")] = func(value) - - return settings - - -@reporter.step("Head object") -def head_object( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - HEAD an Object. - - Args: - wallet (WalletInfo): wallet on whose behalf HEAD is done - cid (str): ID of Container where we get the Object from - oid (str): ObjectID to HEAD - shell: executor for cli command - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - endpoint(optional, str): FrostFS endpoint to send request to - json_output(optional, bool): return response in JSON format or not; this flag - turns into `--json` key - is_raw(optional, bool): send "raw" request or not; this flag - turns into `--raw` key - is_direct(optional, bool): send request directly to the node or not; this flag - turns into `--ttl 1` key - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - depending on the `json_output` parameter value, the function returns - (dict): HEAD response in JSON format - or - (str): HEAD response as a plain text - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.object.head( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - json_mode=json_output, - raw=is_raw, - ttl=1 if is_direct else None, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - if not json_output: - return result - - try: - decoded = json.loads(result.stdout) - except Exception as exc: - # If we failed to parse output as JSON, the cause might be - # the plain text string in the beginning of the output. - # Here we cut off first string and try to parse again. - logger.info(f"failed to parse output: {exc}") - logger.info("parsing output in another way") - fst_line_idx = result.stdout.find("\n") - decoded = json.loads(result.stdout[fst_line_idx:]) - - # if response - if "chunks" in decoded.keys(): - logger.info("decoding ec chunks") - return decoded["chunks"] - - # If response is Complex Object header, it has `splitId` key - if "splitId" in decoded.keys(): - return json_utils.decode_split_header(decoded) - - # If response is Last or Linking Object header, - # it has `header` dictionary and non-null `split` dictionary - if "split" in decoded["header"].keys(): - if decoded["header"]["split"]: - return json_utils.decode_linking_object(decoded) - - if decoded["header"]["objectType"] == "STORAGE_GROUP": - return json_utils.decode_storage_group(decoded) - - if decoded["header"]["objectType"] == "TOMBSTONE": - return json_utils.decode_tombstone(decoded) - - return json_utils.decode_simple_header(decoded) - - -@reporter.step("Run neo-go dump-keys") -def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: - """ - Run neo-go dump keys command - - Args: - shell: executor for cli command - wallet: wallet path to dump from - Returns: - dict Address:Wallet Key - """ - neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) - output = neogo.wallet.dump_keys(wallet=wallet).stdout - first_line = "" - try: - # taking first line from command's output contain wallet address - first_line = output.split("\n")[0] - except Exception: - logger.error(f"Got empty output (neo-go dump keys): {output}") - address_id = first_line.split()[0] - # taking second line from command's output contain wallet key - wallet_key = output.split("\n")[1] - return {address_id: wallet_key} - - -@reporter.step("Run neo-go query height") -def neo_go_query_height(shell: Shell, endpoint: str) -> dict: - """ - Run neo-go query height command - - Args: - shell: executor for cli command - endpoint: endpoint to execute - Returns: - dict-> - Latest block: {value} - Validated state: {value} - - """ - neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) - output = neogo.query.height(rpc_endpoint=endpoint).stdout - first_line = "" - try: - # taking first line from command's output contain the latest block in blockchain - first_line = output.split("\n")[0] - except Exception: - logger.error(f"Got empty output (neo-go query height): {output}") - latest_block = first_line.split(":") - # taking second line from command's output contain wallet key - second_line = output.split("\n")[1] - if second_line != "": - validated_state = second_line.split(":") - return { - latest_block[0].replace(":", ""): int(latest_block[1]), - validated_state[0].replace(":", ""): int(validated_state[1]), - } - return {latest_block[0].replace(":", ""): int(latest_block[1])} - - -@wait_for_success() -@reporter.step("Search object nodes") -def get_object_nodes( - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[ClusterNode]: - shell = alive_node.host.get_shell() - endpoint = alive_node.storage_node.get_rpc_endpoint() - wallet = alive_node.storage_node.get_remote_wallet_path() - wallet_config = alive_node.storage_node.get_remote_wallet_config_path() - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) - - response = cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - - response_json = json.loads(response.stdout) - # Currently, the command will show expected and confirmed nodes. - # And we (currently) count only nodes which are both expected and confirmed - object_nodes_id = { - required_node - for data_object in response_json["data_objects"] - for required_node in data_object["required_nodes"] - if required_node in data_object["confirmed_nodes"] - } - - netmap_nodes_list = parse_netmap_output( - cli.netmap.snapshot( - rpc_endpoint=endpoint, - wallet=wallet, - ).stdout - ) - netmap_nodes = [ - netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id - ] - - object_nodes = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) - ] - - return object_nodes diff --git a/src/frostfs_testlib/steps/cli/tree.py b/src/frostfs_testlib/steps/cli/tree.py deleted file mode 100644 index 4b0dfb3..0000000 --- a/src/frostfs_testlib/steps/cli/tree.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -logger = logging.getLogger("NeoLogger") - - - -@reporter.step("Get Tree List") -def get_tree_list( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> None: - """ - A wrapper for `frostfs-cli tree list` call. - Args: - wallet (WalletInfo): path to a wallet on whose behalf we delete the container - cid (str): ID of the container to delete - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - This function doesn't return anything. - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout) diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py deleted file mode 100644 index e1a7088..0000000 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python3 - -""" - This module contains functions which are used for Large Object assembling: - getting Last Object and split and getting Link Object. It is not enough to - simply perform a "raw" HEAD request. - Therefore, the reliable retrieval of the aforementioned objects must be done - this way: send direct "raw" HEAD request to the every Storage Node and return - the desired OID on first non-null response. -""" - -import logging -from typing import Optional, Tuple - -from frostfs_testlib import reporter -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.object import head_object -from frostfs_testlib.storage.cluster import Cluster, StorageNode -from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -logger = logging.getLogger("NeoLogger") - - -def get_storage_object_chunks( - storage_object: StorageObjectInfo, - shell: Shell, - cluster: Cluster, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[str]: - """ - Get complex object split objects ids (no linker object) - - Args: - storage_object: storage_object to get it's chunks - shell: client shell to do cmd requests - cluster: cluster object under test - timeout: Timeout for an operation. - - Returns: - list of object ids of complex object chunks - """ - - with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): - split_object_id = get_link_object( - storage_object.wallet, - storage_object.cid, - storage_object.oid, - shell, - cluster.services(StorageNode), - is_direct=False, - timeout=timeout, - ) - head = head_object( - storage_object.wallet, - storage_object.cid, - split_object_id, - shell, - cluster.default_rpc_endpoint, - timeout=timeout, - ) - - chunks_object_ids = [] - if "split" in head["header"] and "children" in head["header"]["split"]: - chunks_object_ids = head["header"]["split"]["children"] - - return chunks_object_ids - - -def get_complex_object_split_ranges( - storage_object: StorageObjectInfo, - shell: Shell, - cluster: Cluster, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[Tuple[int, int]]: - - """ - Get list of split ranges tuples (offset, length) of a complex object - For example if object size if 100 and max object size in system is 30 - the returned list should be - [(0, 30), (30, 30), (60, 30), (90, 10)] - - Args: - storage_object: storage_object to get it's chunks - shell: client shell to do cmd requests - cluster: cluster object under test - timeout: Timeout for an operation. - - Returns: - list of object ids of complex object chunks - """ - - ranges: list = [] - offset = 0 - chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) - for chunk_id in chunks_ids: - head = head_object( - storage_object.wallet, - storage_object.cid, - chunk_id, - shell, - cluster.default_rpc_endpoint, - timeout=timeout, - ) - - length = int(head["header"]["payloadLength"]) - ranges.append((offset, length)) - - offset = offset + length - - return ranges - - -@reporter.step("Get Link Object") -def get_link_object( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - nodes: list[StorageNode], - bearer: str = "", - is_direct: bool = True, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -): - """ - Args: - wallet (str): path to the wallet on whose behalf the Storage Nodes - are requested - cid (str): Container ID which stores the Large Object - oid (str): Large Object ID - shell: executor for cli command - nodes: list of nodes to do search on - bearer (optional, str): path to Bearer token file - wallet_config (optional, str): path to the frostfs-cli config file - is_direct: send request directly to the node or not; this flag - turns into `--ttl 1` key - timeout: Timeout for an operation. - Returns: - (str): Link Object ID - When no Link Object ID is found after all Storage Nodes polling, - the function throws an error. - """ - for node in nodes: - endpoint = node.get_rpc_endpoint() - try: - resp = head_object( - wallet, - cid, - oid, - shell=shell, - endpoint=endpoint, - is_raw=True, - is_direct=is_direct, - bearer=bearer, - timeout=timeout, - ) - if resp["link"]: - return resp["link"] - except Exception: - logger.info(f"No Link Object found on {endpoint}; continue") - logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes") - return None - - -@reporter.step("Get Last Object") -def get_last_object( - wallet: WalletInfo, - cid: str, - oid: str, - shell: Shell, - nodes: list[StorageNode], - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> Optional[str]: - """ - Args: - wallet (str): path to the wallet on whose behalf the Storage Nodes - are requested - cid (str): Container ID which stores the Large Object - oid (str): Large Object ID - shell: executor for cli command - nodes: list of nodes to do search on - timeout: Timeout for an operation. - Returns: - (str): Last Object ID - When no Last Object ID is found after all Storage Nodes polling, - the function throws an error. - """ - for node in nodes: - endpoint = node.get_rpc_endpoint() - try: - resp = head_object( - wallet, - cid, - oid, - shell=shell, - endpoint=endpoint, - is_raw=True, - is_direct=True, - timeout=timeout, - ) - if resp["lastPart"]: - return resp["lastPart"] - except Exception: - logger.info(f"No Last Object found on {endpoint}; continue") - logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes") - return None diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py deleted file mode 100644 index 6ec5483..0000000 --- a/src/frostfs_testlib/steps/epoch.py +++ /dev/null @@ -1,125 +0,0 @@ -import logging -from time import sleep -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import MORPH_BLOCK_TIME -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.payment_neogo import get_contract_hash -from frostfs_testlib.storage.cluster import Cluster, StorageNode -from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, MorphChain -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils import datetime_utils, wallet_utils - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Get epochs from nodes") -def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: - """ - Get current epochs on each node. - - Args: - shell: shell to run commands on. - cluster: cluster under test. - - Returns: - Dict of {node_ip: epoch}. - """ - epochs_by_node = {} - for node in cluster.services(StorageNode): - epochs_by_node[node.host.config.address] = get_epoch(shell, cluster, node) - return epochs_by_node - - -@reporter.step("Ensure fresh epoch") -def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: - # ensure new fresh epoch to avoid epoch switch during test session - alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] - current_epoch = get_epoch(shell, cluster, alive_node) - tick_epoch(shell, cluster, alive_node) - epoch = get_epoch(shell, cluster, alive_node) - assert epoch > current_epoch, "Epoch wasn't ticked" - return epoch - - -@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") -def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): - @wait_for_success(timeout, 5, None, True) - def check_epochs(): - epochs_by_node = get_epochs_from_nodes(shell, cluster) - assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}" - - check_epochs() - - -@reporter.step("Get Epoch") -def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): - alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] - endpoint = alive_node.get_rpc_endpoint() - wallet_path = alive_node.get_wallet_path() - wallet_config = alive_node.get_wallet_config_path() - - cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config) - - epoch = cli.netmap.epoch(endpoint, wallet_path, timeout=CLI_DEFAULT_TIMEOUT) - return int(epoch.stdout) - - -@reporter.step("Tick Epoch") -def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): - """ - Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) - Args: - shell: local shell to make queries about current epoch. Remote shell will be used to tick new one - cluster: cluster instance under test - alive_node: node to send requests to (first node in cluster by default) - """ - - alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] - remote_shell = alive_node.host.get_shell() - - if "force_transactions" not in alive_node.host.config.attributes: - # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfs_adm = FrostfsAdm( - shell=remote_shell, - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, - ) - frostfs_adm.morph.force_new_epoch(delta=delta) - return - - # Otherwise we tick epoch using transaction - cur_epoch = get_epoch(shell, cluster) - - if delta: - next_epoch = cur_epoch + delta - else: - next_epoch = cur_epoch + 1 - - # Use first node by default - ir_node = cluster.services(InnerRing)[0] - # In case if no local_wallet_path is provided, we use wallet_path - ir_wallet_path = ir_node.get_wallet_path() - ir_wallet_pass = ir_node.get_wallet_password() - ir_address = wallet_utils.get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass) - - morph_chain = cluster.services(MorphChain)[0] - morph_endpoint = morph_chain.get_endpoint() - - neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) - neogo.contract.invokefunction( - wallet=ir_wallet_path, - wallet_password=ir_wallet_pass, - scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), - method="newEpoch", - arguments=f"int:{next_epoch}", - multisig_hash=f"{ir_address}:Global", - address=ir_address, - rpc_endpoint=morph_endpoint, - force=True, - gas=1, - ) - sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py deleted file mode 100644 index aa4abf2..0000000 --- a/src/frostfs_testlib/steps/http_gate.py +++ /dev/null @@ -1,370 +0,0 @@ -import logging -import os -import random -import re -import shutil -import uuid -import zipfile -from typing import Optional -from urllib.parse import quote_plus - -import requests - -from frostfs_testlib import reporter -from frostfs_testlib.cli import GenericCli -from frostfs_testlib.clients.s3.aws_cli_client import command_options -from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE -from frostfs_testlib.shell import Shell -from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.steps.cli.object import get_object -from frostfs_testlib.steps.storage_policy import get_nodes_without_object -from frostfs_testlib.storage.cluster import ClusterNode, StorageNode -from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils.file_utils import TestFile, get_file_hash - -logger = logging.getLogger("NeoLogger") - -local_shell = LocalShell() - - -@reporter.step("Get via HTTP Gate") -def get_via_http_gate( - cid: str, - oid: str, - node: ClusterNode, - request_path: Optional[str] = None, - presigned_url: Optional[str] = None, - timeout: Optional[int] = 300, -): - """ - This function gets given object from HTTP gate - cid: container id to get object from - oid: object id / object key - node: node to make request - request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] - """ - - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - if request_path: - request = f"{node.http_gate.get_endpoint()}{request_path}" - - if presigned_url: - request = presigned_url - - response = requests.get(request, stream=True, timeout=timeout, verify=False) - - if not response.ok: - raise Exception( - f"""Failed to get object via HTTP gate: - request: {response.request.path_url}, - response: {response.text}, - headers: {response.headers}, - status code: {response.status_code} {response.reason}""" - ) - - logger.info(f"Request: {request}") - _attach_allure_step(request, response.status_code) - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) - with open(test_file, "wb") as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - - return test_file - - -@reporter.step("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): - """ - This function gets given object from HTTP gate - cid: container id to get object from - prefix: common prefix - node: node to make request - """ - request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) - - if not resp.ok: - raise Exception( - f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - headers: {resp.headers}, - status code: {resp.status_code} {resp.reason}""" - ) - - logger.info(f"Request: {request}") - _attach_allure_step(request, resp.status_code) - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) - with open(test_file, "wb") as file: - shutil.copyfileobj(resp.raw, file) - - with zipfile.ZipFile(test_file, "r") as zip_ref: - zip_ref.extractall(ASSETS_DIR) - - return os.path.join(os.getcwd(), ASSETS_DIR, prefix) - - -@reporter.step("Get via HTTP Gate by attribute") -def get_via_http_gate_by_attribute( - cid: str, - attribute: dict, - node: ClusterNode, - request_path: Optional[str] = None, - timeout: Optional[int] = 300, -): - """ - This function gets given object from HTTP gate - cid: CID to get object from - attribute: attribute {name: attribute} value pair - endpoint: http gate endpoint - request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] - """ - - attr_name = list(attribute.keys())[0] - attr_value = quote_plus(str(attribute.get(attr_name))) - - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - if request_path: - request = f"{node.http_gate.get_endpoint()}{request_path}" - - resp = requests.get(request, stream=True, timeout=timeout, verify=False) - - if not resp.ok: - raise Exception( - f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - headers: {resp.headers}, - status code: {resp.status_code} {resp.reason}""" - ) - - logger.info(f"Request: {request}") - _attach_allure_step(request, resp.status_code) - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) - with open(test_file, "wb") as file: - shutil.copyfileobj(resp.raw, file) - return test_file - - -@reporter.step("Upload via HTTP Gate") -def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: - """ - This function upload given object through HTTP gate - cid: CID to get object from - path: File path to upload - endpoint: http gate endpoint - headers: Object header - """ - request = f"{endpoint}/upload/{cid}" - files = {"upload_file": open(path, "rb")} - body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) - - if not resp.ok: - raise Exception( - f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - status code: {resp.status_code} {resp.reason}""" - ) - - logger.info(f"Request: {request}") - _attach_allure_step(request, resp.json(), req_type="POST") - - assert resp.json().get("object_id"), f"OID found in response {resp}" - - return resp.json().get("object_id") - - -@reporter.step("Check is the passed object large") -def is_object_large(filepath: str) -> bool: - """ - This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE - filepath: File path to check - """ - file_size = os.path.getsize(filepath) - logger.info(f"Size= {file_size}") - if file_size > int(SIMPLE_OBJECT_SIZE): - return True - else: - return False - - -@reporter.step("Upload via HTTP Gate using Curl") -def upload_via_http_gate_curl( - cid: str, - filepath: str, - endpoint: str, - headers: Optional[list] = None, - error_pattern: Optional[str] = None, -) -> str: - """ - This function upload given object through HTTP gate using curl utility. - cid: CID to get object from - filepath: File path to upload - headers: Object header - endpoint: http gate endpoint - error_pattern: [optional] expected error message from the command - """ - request = f"{endpoint}/upload/{cid}" - attributes = "" - if headers: - # parse attributes - attributes = " ".join(headers) - - large_object = is_object_large(filepath) - if large_object: - # pre-clean - local_shell.exec("rm pipe -f") - files = f"file=@pipe;filename={os.path.basename(filepath)}" - cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" - output = local_shell.exec(cmd, command_options) - # clean up pipe - local_shell.exec("rm pipe") - else: - files = f"file=@{filepath};filename={os.path.basename(filepath)}" - cmd = f"curl -k -F '{files}' {attributes} {request}" - output = local_shell.exec(cmd) - - if error_pattern: - match = error_pattern.casefold() in str(output).casefold() - assert match, f"Expected {output} to match {error_pattern}" - return "" - - oid_re = re.search(r'"object_id": "(.*)"', output) - if not oid_re: - raise AssertionError(f'Could not find "object_id" in {output}') - return oid_re.group(1) - - -@retry(max_attempts=3, sleep_interval=1) -@reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: - """ - This function gets given object from HTTP gate using curl utility. - cid: CID to get object from - oid: object OID - node: node for request - """ - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) - - curl = GenericCli("curl", node.host) - curl(f"-k ", f"{request} > {test_file}", shell=local_shell) - - return test_file - - -def _attach_allure_step(request: str, status_code: int, req_type="GET"): - command_attachment = f"REQUEST: '{request}'\n" f"RESPONSE:\n {status_code}\n" - with reporter.step(f"{req_type} Request"): - reporter.attach(command_attachment, f"{req_type} Request") - - -@reporter.step("Try to get object and expect error") -def try_to_get_object_and_expect_error( - cid: str, - oid: str, - node: ClusterNode, - error_pattern: str, -) -> None: - try: - get_via_http_gate(cid=cid, oid=oid, node=node) - raise AssertionError(f"Expected error on getting object with cid: {cid}") - except Exception as err: - match = error_pattern.casefold() in str(err).casefold() - assert match, f"Expected {err} to match {error_pattern}" - - -@reporter.step("Verify object can be get using HTTP header attribute") -def get_object_by_attr_and_verify_hashes( - oid: str, - file_name: str, - cid: str, - attrs: dict, - node: ClusterNode, -) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) - got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) - assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) - - -def verify_object_hash( - oid: str, - file_name: str, - wallet: str, - cid: str, - shell: Shell, - nodes: list[StorageNode], - request_node: ClusterNode, - object_getter=None, -) -> None: - - nodes_list = get_nodes_without_object( - wallet=wallet, - cid=cid, - oid=oid, - shell=shell, - nodes=nodes, - ) - # for some reason we can face with case when nodes_list is empty due to object resides in all nodes - if nodes_list: - random_node = random.choice(nodes_list) - else: - random_node = random.choice(nodes) - - object_getter = object_getter or get_via_http_gate - - got_file_path = get_object( - wallet=wallet, - cid=cid, - oid=oid, - shell=shell, - endpoint=random_node.get_rpc_endpoint(), - ) - got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) - - assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) - - -def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None: - msg = "Expected hashes are equal for files {f1} and {f2}" - got_file_hash_http = get_file_hash(got_file_1) - assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) - assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) - - -def attr_into_header(attrs: dict) -> dict: - return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} - - -@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") -def attr_into_str_header_curl(attrs: dict) -> list: - headers = [] - for k, v in attrs.items(): - headers.append(f"-H 'X-Attribute-{k}: {v}'") - logger.info(f"[List of Attrs for curl:] {headers}") - return headers - - -@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") -def try_to_get_object_via_passed_request_and_expect_error( - cid: str, - oid: str, - node: ClusterNode, - error_pattern: str, - http_request_path: str, - attrs: Optional[dict] = None, -) -> None: - try: - if attrs is None: - get_via_http_gate(cid, oid, node, http_request_path) - else: - get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) - raise AssertionError(f"Expected error on getting object with cid: {cid}") - except Exception as err: - match = error_pattern.casefold() in str(err).casefold() - assert match, f"Expected {err} to match {error_pattern}" diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py deleted file mode 100644 index 0d0950a..0000000 --- a/src/frostfs_testlib/steps/metrics.py +++ /dev/null @@ -1,45 +0,0 @@ -import re - -from frostfs_testlib import reporter -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.test_control import wait_for_success - - -@reporter.step("Check metrics result") -@wait_for_success(max_wait_time=300, interval=10) -def check_metrics_counter( - cluster_nodes: list[ClusterNode], - operator: str = "==", - counter_exp: int = 0, - parse_from_command: bool = False, - **metrics_greps: str, -): - counter_act = 0 - for cluster_node in cluster_nodes: - counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) - assert eval( - f"{counter_act} {operator} {counter_exp}" - ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" - - -@reporter.step("Get metrics value from node: {node}") -def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str): - try: - command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps) - if parse_from_command: - metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps) - else: - metrics_counter = calc_metrics_count_from_stdout(command_result.stdout) - except RuntimeError as e: - metrics_counter = 0 - - return metrics_counter - - -@reporter.step("Parse metrics count and calc sum of result") -def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None): - if command: - result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout) - else: - result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout) - return sum(map(lambda x: int(float(x)), result)) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py deleted file mode 100644 index 6bde2f1..0000000 --- a/src/frostfs_testlib/steps/network.py +++ /dev/null @@ -1,21 +0,0 @@ -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.storage.cluster import ClusterNode - - -class IpHelper: - @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: - shell = node.host.get_shell() - for ip, table in block_ip: - if not table: - shell.exec(f"ip r a blackhole {ip}") - continue - shell.exec(f"ip r a blackhole {ip} table {table}") - - @staticmethod - def restore_input_traffic_to_node(node: ClusterNode) -> None: - shell = node.host.get_shell() - unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout - - for active_blackhole in unlock_ip.strip().split("\n"): - shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py deleted file mode 100644 index 42b1fc5..0000000 --- a/src/frostfs_testlib/steps/node_management.py +++ /dev/null @@ -1,292 +0,0 @@ -import logging -import random -import re -import time -from dataclasses import dataclass -from time import sleep -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import MORPH_BLOCK_TIME -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align -from frostfs_testlib.storage.cluster import Cluster, StorageNode -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils import datetime_utils - -logger = logging.getLogger("NeoLogger") - - -@dataclass -class HealthStatus: - network_status: Optional[str] = None - health_status: Optional[str] = None - - @staticmethod - def from_stdout(output: str) -> "HealthStatus": - network, health = None, None - for line in output.split("\n"): - if "Network status" in line: - network = line.split(":")[-1].strip() - if "Health status" in line: - health = line.split(":")[-1].strip() - return HealthStatus(network, health) - - -@reporter.step("Get Locode from random storage node") -def get_locode_from_random_node(cluster: Cluster) -> str: - node = random.choice(cluster.services(StorageNode)) - locode = node.get_un_locode() - logger.info(f"Chosen '{locode}' locode from node {node}") - return locode - - -@reporter.step("Healthcheck for storage node {node}") -def storage_node_healthcheck(node: StorageNode) -> HealthStatus: - """ - The function returns storage node's health status. - Args: - node: storage node for which health status should be retrieved. - Returns: - health status as HealthStatus object. - """ - - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - result = cli.control.healthcheck(control_endpoint) - - return HealthStatus.from_stdout(result.stdout) - - -@reporter.step("Set status for {node}") -def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: - """ - The function sets particular status for given node. - Args: - node: node for which status should be set. - status: online or offline. - retries (optional, int): number of retry attempts if it didn't work from the first time - """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - cli.control.set_status(control_endpoint, status) - - -@reporter.step("Get netmap snapshot") -def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: - """ - The function returns string representation of netmap snapshot. - Args: - node: node from which netmap snapshot should be requested. - Returns: - string representation of netmap - """ - - storage_wallet_config = node.get_wallet_config_path() - storage_wallet_path = node.get_wallet_path() - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) - return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout - - -@reporter.step("Get shard list for {node}") -def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: - """ - The function returns list of shards for specified storage node. - Args: - node: node for which shards should be returned. - Returns: - list of shards. - """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - result = cli.shards.list(endpoint=control_endpoint, json_mode=json) - - return re.findall(r"Shard (.*):", result.stdout) - - -@reporter.step("Shard set for {node}") -def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: - """ - The function sets mode for specified shard. - Args: - node: node on which shard mode should be set. - """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard) - - -@reporter.step("Drop object from {node}") -def drop_object(node: StorageNode, cid: str, oid: str) -> None: - """ - The function drops object from specified node. - Args: - node: node from which object should be dropped. - """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - objects = f"{cid}/{oid}" - cli.control.drop_objects(control_endpoint, objects) - - -@reporter.step("Delete data from host for node {node}") -def delete_node_data(node: StorageNode) -> None: - node.stop_service() - node.host.delete_storage_node_data(node.name) - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) - - -@reporter.step("Exclude node {node_to_exclude} from network map") -def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: - node_netmap_key = node_to_exclude.get_wallet_public_key() - - storage_node_set_status(node_to_exclude, status="offline") - - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) - tick_epoch(shell, cluster) - wait_for_epochs_align(shell, cluster) - - snapshot = get_netmap_snapshot(node=alive_node, shell=shell) - assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" - - -@reporter.step("Include node {node_to_include} into network map") -def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: - storage_node_set_status(node_to_include, status="online") - - # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. - # First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete. - - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - tick_epoch(shell, cluster) - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - - await_node_in_map(node_to_include, shell, alive_node) - - -@reporter.step("Check node {node} in network map") -def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: - alive_node = alive_node or node - - node_netmap_key = node.get_wallet_public_key() - logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") - - snapshot = get_netmap_snapshot(alive_node, shell) - assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" - - -@wait_for_success(300, 15, title="Await node {node} in network map") -def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: - check_node_in_map(node, shell, alive_node) - - -@reporter.step("Check node {node} NOT in network map") -def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: - alive_node = alive_node or node - - node_netmap_key = node.get_wallet_public_key() - logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") - - snapshot = get_netmap_snapshot(alive_node, shell) - assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" - - -@reporter.step("Wait for node {node} is ready") -def wait_for_node_to_be_ready(node: StorageNode) -> None: - timeout, attempts = 60, 15 - for _ in range(attempts): - try: - health_check = storage_node_healthcheck(node) - if health_check.health_status == "READY": - return - except Exception as err: - logger.warning(f"Node {node} is not ready:\n{err}") - sleep(timeout) - raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") - - -@reporter.step("Remove nodes from network map trough cli-adm morph command") -def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): - """ - Move node to the Offline state in the candidates list and tick an epoch to update the netmap - using frostfs-adm - Args: - shell: local shell to make queries about current epoch. Remote shell will be used to tick new one - cluster: cluster instance under test - alive_node: node to send requests to (first node in cluster by default) - remove_nodes: list of nodes which would be removed from map - """ - - alive_node = alive_node if alive_node else remove_nodes[0] - remote_shell = alive_node.host.get_shell() - - node_netmap_keys = list(map(StorageNode.get_wallet_public_key, remove_nodes)) - logger.info(f"Nodes netmap keys are: {' '.join(node_netmap_keys)}") - - if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: - # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) - frostfsadm.morph.remove_nodes(node_netmap_keys) diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py deleted file mode 100644 index 8e78cca..0000000 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ /dev/null @@ -1,147 +0,0 @@ -import base64 -import json -import logging -import re -import time -from typing import Optional - -from neo3.wallet import utils as neo3_utils -from neo3.wallet import wallet as neo3_wallet - -from frostfs_testlib import reporter -from frostfs_testlib.cli import NeoGo -from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain -from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils - -logger = logging.getLogger("NeoLogger") - -EMPTY_PASSWORD = "" -TX_PERSIST_TIMEOUT = 15 # seconds -ASSET_POWER_SIDECHAIN = 10**12 - - -def get_nns_contract_hash(morph_chain: MorphChain) -> str: - return morph_chain.rpc_client.get_contract_state(1)["hash"] - - -def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str: - nns_contract_hash = get_nns_contract_hash(morph_chain) - neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) - out = neogo.contract.testinvokefunction( - scripthash=nns_contract_hash, - method="resolve", - arguments=f"string:{resolve_name} int:16", - rpc_endpoint=morph_chain.get_endpoint(), - ) - stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] - return bytes.decode(base64.b64decode(stack_data[0]["value"])) - - -def transaction_accepted(morph_chain: MorphChain, tx_id: str): - """ - This function returns True in case of accepted TX. - Args: - tx_id(str): transaction ID - Returns: - (bool) - """ - - try: - for _ in range(0, TX_PERSIST_TIMEOUT): - time.sleep(1) - neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) - resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint()) - if resp is not None: - logger.info(f"TX is accepted in block: {resp}") - return True, resp - except Exception as out: - logger.info(f"request failed with error: {out}") - raise out - return False - - -@reporter.step("Get FrostFS Balance") -def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): - """ - This function returns FrostFS balance for given wallet. - """ - with open(wallet_path) as wallet_file: - wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) - acc = wallet.accounts[-1] - payload = [{"type": "Hash160", "value": str(acc.script_hash)}] - try: - resp = morph_chain.rpc_client.invoke_function( - get_contract_hash(morph_chain, "balance.frostfs", shell=shell), "balanceOf", payload - ) - logger.info(f"Got response \n{resp}") - value = int(resp["stack"][0]["value"]) - return value / ASSET_POWER_SIDECHAIN - except Exception as out: - logger.error(f"failed to get wallet balance: {out}") - raise out - - -@reporter.step("Transfer Gas") -def transfer_gas( - shell: Shell, - amount: int, - morph_chain: MorphChain, - wallet_from_path: Optional[str] = None, - wallet_from_password: Optional[str] = None, - address_from: Optional[str] = None, - address_to: Optional[str] = None, - wallet_to_path: Optional[str] = None, - wallet_to_password: Optional[str] = None, -): - """ - This function transfer GAS in main chain from mainnet wallet to - the provided wallet. If the wallet contains more than one address, - the assets will be transferred to the last one. - Args: - shell: Shell instance. - wallet_from_password: Password of the wallet; it is required to decode the wallet - and extract its addresses. - wallet_from_path: Path to chain node wallet. - address_from: The address of the wallet to transfer assets from. - wallet_to_path: The path to the wallet to transfer assets to. - wallet_to_password: The password to the wallet to transfer assets to. - address_to: The address of the wallet to transfer assets to. - amount: Amount of gas to transfer. - """ - wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() - wallet_from_password = ( - wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() - ) - address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password) - address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password) - - neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) - out = neogo.nep17.transfer( - rpc_endpoint=morph_chain.get_endpoint(), - wallet=wallet_from_path, - wallet_password=wallet_from_password, - amount=amount, - from_address=address_from, - to_address=address_to, - token="GAS", - force=True, - ) - txid = out.stdout.strip().split("\n")[-1] - if len(txid) != 64: - raise Exception("Got no TXID after run the command") - if not transaction_accepted(morph_chain, txid): - raise AssertionError(f"TX {txid} hasn't been processed") - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) - - -@reporter.step("Get Sidechain Balance") -def get_sidechain_balance(morph_chain: MorphChain, address: str): - resp = morph_chain.rpc_client.get_nep17_balances(address=address) - logger.info(f"Got getnep17balances response: {resp}") - for balance in resp["balance"]: - if balance["assethash"] == GAS_HASH: - return float(balance["amount"]) / ASSET_POWER_SIDECHAIN - return float(0) diff --git a/src/frostfs_testlib/steps/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py deleted file mode 100644 index c3092df..0000000 --- a/src/frostfs_testlib/steps/s3_helper.py +++ /dev/null @@ -1,209 +0,0 @@ -import logging -import os -from datetime import datetime, timedelta -from typing import Optional - -from dateutil.parser import parse - -from frostfs_testlib import reporter -from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.container import search_nodes_with_container -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Expected all objects are presented in the bucket") -def check_objects_in_bucket( - s3_client: S3ClientWrapper, - bucket: str, - expected_objects: list, - unexpected_objects: Optional[list] = None, -) -> None: - unexpected_objects = unexpected_objects or [] - bucket_objects = s3_client.list_objects(bucket) - assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" - for bucket_object in expected_objects: - assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" - - for bucket_object in unexpected_objects: - assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" - - -@reporter.step("Try to get object and got error") -def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: - for obj in object_keys: - try: - s3_client.get_object(bucket, obj) - raise AssertionError(f"Object {obj} found in bucket {bucket}") - except Exception as err: - assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" - - -@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") -def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): - if status == VersioningStatus.UNDEFINED: - return - - s3_client.put_bucket_versioning(bucket, status=status) - bucket_status = s3_client.get_bucket_versioning_status(bucket) - assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" - - -def object_key_from_file_path(full_path: str) -> str: - return os.path.basename(full_path) - - -def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: - expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] - unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] - if expected_tags == []: - assert not actual_tags, f"Expected there is no tags, got {actual_tags}" - assert len(expected_tags) == len(actual_tags) - for tag in expected_tags: - assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}" - for tag in unexpected_tags: - assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" - - -@reporter.step("Expected all tags are presented in object") -def check_tags_by_object( - s3_client: S3ClientWrapper, - bucket: str, - key: str, - expected_tags: list, - unexpected_tags: Optional[list] = None, -) -> None: - actual_tags = s3_client.get_object_tagging(bucket, key) - assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) - - -@reporter.step("Expected all tags are presented in bucket") -def check_tags_by_bucket( - s3_client: S3ClientWrapper, - bucket: str, - expected_tags: list, - unexpected_tags: Optional[list] = None, -) -> None: - actual_tags = s3_client.get_bucket_tagging(bucket) - assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) - - -def assert_object_lock_mode( - s3_client: S3ClientWrapper, - bucket: str, - file_name: str, - object_lock_mode: str, - retain_until_date: datetime, - legal_hold_status: str = "OFF", - retain_period: Optional[int] = None, -): - object_dict = s3_client.get_object(bucket, file_name, full_output=True) - assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" - assert ( - object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status - ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" - object_retain_date = object_dict.get("ObjectLockRetainUntilDate") - retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date - if retain_until_date: - assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( - "%Y-%m-%dT%H:%M:%S" - ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' - elif retain_period: - last_modify_date = object_dict.get("LastModified") - last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date - assert ( - retain_date - last_modify + timedelta(seconds=1) - ).days == retain_period, f"Expected retention period is {retain_period} days" - - -def _format_grants_as_strings(grants: list[dict]) -> list: - grantee_format = "{g_type}::{uri}:{permission}" - return set( - [ - grantee_format.format( - g_type=grant.get("Grantee", {}).get("Type", ""), - uri=grant.get("Grantee", {}).get("URI", ""), - permission=grant.get("Permission", ""), - ) - for grant in grants - ] - ) - - -@reporter.step("Verify ACL permissions") -def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): - actual_grants = _format_grants_as_strings(actual_acl_grants) - expected_grants = _format_grants_as_strings(expected_acl_grants) - - assert expected_grants <= actual_grants, "Permissions mismatch" - if strict: - assert expected_grants == actual_grants, "Extra permissions found, must not be there" - - -@reporter.step("Delete bucket with all objects") -def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): - versioning_status = s3_client.get_bucket_versioning_status(bucket) - if versioning_status == VersioningStatus.ENABLED.value: - # From versioned bucket we should delete all versions and delete markers of all objects - objects_versions = s3_client.list_objects_versions(bucket) - if objects_versions: - s3_client.delete_object_versions_without_dm(bucket, objects_versions) - objects_delete_markers = s3_client.list_delete_markers(bucket) - if objects_delete_markers: - s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) - - else: - # From non-versioned bucket it's sufficient to delete objects by key - objects = s3_client.list_objects(bucket) - if objects: - s3_client.delete_objects(bucket, objects) - objects_delete_markers = s3_client.list_delete_markers(bucket) - if objects_delete_markers: - s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) - - # Delete the bucket itself - s3_client.delete_bucket(bucket) - - -@reporter.step("Search nodes bucket") -def search_nodes_with_bucket( - cluster: Cluster, - bucket_name: str, - wallet: WalletInfo, - shell: Shell, - endpoint: str, - bucket_container_resolver: BucketContainerResolver, -) -> list[ClusterNode]: - cid = None - for cluster_node in cluster.cluster_nodes: - cid = bucket_container_resolver.resolve(cluster_node, bucket_name) - if cid: - break - nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) - return nodes_list - - -def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: - if isinstance(value, int): - return value - - if "part" not in value and "object" not in value: - return int(value) - - if object_size is not None: - value = value.replace("object", str(object_size)) - - if part_size is not None: - value = value.replace("part", str(part_size)) - - return int(eval(value)) - - -def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: - start, end = rng.split(":") - start = get_bytes_relative_to_object(start, object_size, part_size) - end = get_bytes_relative_to_object(end, object_size, part_size) - return (start, end) if int_values else f"bytes {start}-{end}/*" diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py deleted file mode 100644 index 67c556d..0000000 --- a/src/frostfs_testlib/steps/session_token.py +++ /dev/null @@ -1,274 +0,0 @@ -import base64 -import json -import logging -import os -import uuid -from dataclasses import dataclass -from typing import Any, Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.readable import HumanReadableEnum -from frostfs_testlib.utils import json_utils, wallet_utils - -logger = logging.getLogger("NeoLogger") - -UNRELATED_KEY = "unrelated key in the session" -UNRELATED_OBJECT = "unrelated object in the session" -UNRELATED_CONTAINER = "unrelated container in the session" -WRONG_VERB = "wrong verb of the session" -INVALID_SIGNATURE = "invalid signature of the session data" - - -class ObjectVerb(HumanReadableEnum): - PUT = "PUT" - DELETE = "DELETE" - GET = "GET" - RANGEHASH = "RANGEHASH" - RANGE = "RANGE" - HEAD = "HEAD" - SEARCH = "SEARCH" - - -class ContainerVerb(HumanReadableEnum): - CREATE = "PUT" - DELETE = "DELETE" - SETEACL = "SETEACL" - - -@dataclass -class Lifetime: - exp: int = 100000000 - nbf: int = 0 - iat: int = 0 - - -@reporter.step("Generate Session Token") -def generate_session_token( - owner_wallet: WalletInfo, - session_wallet: WalletInfo, - session: dict[str, dict[str, Any]], - tokens_dir: str, - lifetime: Optional[Lifetime] = None, -) -> str: - """ - This function generates session token and writes it to the file. - Args: - owner_wallet: wallet of container owner - session_wallet: wallet to which we grant the access via session token - session: Contains allowed operation with parameters - tokens_dir: Dir for token - lifetime: lifetime options for session - Returns: - The path to the generated session token file - """ - - file_path = os.path.join(tokens_dir, str(uuid.uuid4())) - - pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") - - lifetime = lifetime or Lifetime() - - session_token = { - "body": { - "id": f"{base64.b64encode(uuid.uuid4().bytes).decode('utf-8')}", - "ownerID": {"value": f"{json_utils.encode_for_json(owner_wallet.get_address())}"}, - "lifetime": { - "exp": f"{lifetime.exp}", - "nbf": f"{lifetime.nbf}", - "iat": f"{lifetime.iat}", - }, - "sessionKey": pub_key_64, - } - } - session_token["body"].update(session) - - logger.info(f"Got this Session Token: {session_token}") - with open(file_path, "w", encoding="utf-8") as session_token_file: - json.dump(session_token, session_token_file, ensure_ascii=False, indent=4) - - return file_path - - -@reporter.step("Generate Session Token For Container") -def generate_container_session_token( - owner_wallet: WalletInfo, - session_wallet: WalletInfo, - verb: ContainerVerb, - tokens_dir: str, - lifetime: Optional[Lifetime] = None, - cid: Optional[str] = None, -) -> str: - """ - This function generates session token for ContainerSessionContext - and writes it to the file. It is able to prepare session token file - for a specific container () or for every container (adds - "wildcard" field). - Args: - owner_wallet: wallet of container owner. - session_wallet: wallet to which we grant the access via session token. - verb: verb to grant access to. - lifetime: lifetime options for session. - cid: container ID of the container - Returns: - The path to the generated session token file - """ - session = { - "container": { - "verb": verb.value, - "wildcard": cid is None, - **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), - }, - } - - return generate_session_token( - owner_wallet=owner_wallet, - session_wallet=session_wallet, - session=session, - tokens_dir=tokens_dir, - lifetime=lifetime, - ) - - -@reporter.step("Generate Session Token For Object") -def generate_object_session_token( - owner_wallet: WalletInfo, - session_wallet: WalletInfo, - oids: list[str], - cid: str, - verb: ObjectVerb, - tokens_dir: str, - lifetime: Optional[Lifetime] = None, -) -> str: - """ - This function generates session token for ObjectSessionContext - and writes it to the file. - Args: - owner_wallet: wallet of container owner - session_wallet: wallet to which we grant the access via session token - cid: container ID of the container - oids: list of objectIDs to put into session - verb: verb to grant access to; Valid verbs are: ObjectVerb. - lifetime: lifetime options for session - Returns: - The path to the generated session token file - """ - session = { - "object": { - "verb": verb.value, - "target": { - "container": {"value": json_utils.encode_for_json(cid)}, - "objects": [{"value": json_utils.encode_for_json(oid)} for oid in oids], - }, - }, - } - - return generate_session_token( - owner_wallet=owner_wallet, - session_wallet=session_wallet, - session=session, - tokens_dir=tokens_dir, - lifetime=lifetime, - ) - - -@reporter.step("Get signed token for container session") -def get_container_signed_token( - owner_wallet: WalletInfo, - user_wallet: WalletInfo, - verb: ContainerVerb, - shell: Shell, - tokens_dir: str, - lifetime: Optional[Lifetime] = None, -) -> str: - """ - Returns signed token file path for static container session - """ - session_token_file = generate_container_session_token( - owner_wallet=owner_wallet, - session_wallet=user_wallet, - verb=verb, - tokens_dir=tokens_dir, - lifetime=lifetime, - ) - return sign_session_token(shell, session_token_file, owner_wallet) - - -@reporter.step("Get signed token for object session") -def get_object_signed_token( - owner_wallet: WalletInfo, - user_wallet: WalletInfo, - cid: str, - storage_objects: list[StorageObjectInfo], - verb: ObjectVerb, - shell: Shell, - tokens_dir: str, - lifetime: Optional[Lifetime] = None, -) -> str: - """ - Returns signed token file path for static object session - """ - storage_object_ids = [storage_object.oid for storage_object in storage_objects] - session_token_file = generate_object_session_token( - owner_wallet=owner_wallet, - session_wallet=user_wallet, - oids=storage_object_ids, - cid=cid, - verb=verb, - tokens_dir=tokens_dir, - lifetime=lifetime, - ) - return sign_session_token(shell, session_token_file, owner_wallet) - - -@reporter.step("Create Session Token") -def create_session_token( - shell: Shell, - owner: str, - wallet: WalletInfo, - rpc_endpoint: str, -) -> str: - """ - Create session token for an object. - Args: - shell: Shell instance. - owner: User that writes the token. - wallet_path: The path to wallet to which we grant the access via session token. - wallet_password: Wallet password. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - Returns: - The path to the generated session token file. - """ - session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - frostfscli.session.create( - rpc_endpoint=rpc_endpoint, - address=owner, - out=session_token, - wallet=wallet.path, - ) - return session_token - - -@reporter.step("Sign Session Token") -def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: - """ - This function signs the session token by the given wallet. - - Args: - shell: Shell instance. - session_token_file: The path to the session token file. - wlt: The path to the signing wallet. - - Returns: - The path to the signed token. - """ - signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - frostfscli.util.sign_session_token(session_token_file, signed_token_file) - return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py deleted file mode 100644 index 4b4b2a6..0000000 --- a/src/frostfs_testlib/steps/storage_object.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging -from time import sleep - -import pytest - -from frostfs_testlib import reporter -from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.object import delete_object, get_object -from frostfs_testlib.steps.epoch import tick_epoch -from frostfs_testlib.steps.tombstone import verify_head_tombstone -from frostfs_testlib.storage.cluster import Cluster -from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo - -logger = logging.getLogger("NeoLogger") - -CLEANUP_TIMEOUT = 10 - - -@reporter.step("Delete Objects") -def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: - """ - Deletes given storage objects. - - Args: - storage_objects: list of objects to delete - shell: executor for cli command - """ - - with reporter.step("Delete objects"): - for storage_object in storage_objects: - storage_object.tombstone = delete_object( - storage_object.wallet, - storage_object.cid, - storage_object.oid, - shell=shell, - endpoint=cluster.default_rpc_endpoint, - ) - verify_head_tombstone( - wallet=storage_object.wallet, - cid=storage_object.cid, - oid_ts=storage_object.tombstone, - oid=storage_object.oid, - shell=shell, - endpoint=cluster.default_rpc_endpoint, - ) - - tick_epoch(shell, cluster) - sleep(CLEANUP_TIMEOUT) - - with reporter.step("Get objects and check errors"): - for storage_object in storage_objects: - with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): - get_object( - storage_object.wallet, - storage_object.cid, - storage_object.oid, - shell=shell, - endpoint=cluster.default_rpc_endpoint, - ) diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py deleted file mode 100644 index acc113f..0000000 --- a/src/frostfs_testlib/steps/storage_policy.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/python3 - -""" - This module contains keywords which are used for asserting - that storage policies are respected. -""" -import logging - -from frostfs_testlib import reporter -from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.object import head_object -from frostfs_testlib.steps.complex_object_actions import get_last_object -from frostfs_testlib.storage.cluster import StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils import string_utils - -logger = logging.getLogger("NeoLogger") - - -# TODO: Unused, remove or make use of -@reporter.step("Get Object Copies") -def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: - """ - The function performs requests to all nodes of the container and - finds out if they store a copy of the object. The procedure is - different for simple and complex object, so the function requires - a sign of object complexity. - Args: - complexity (str): the tag of object size and complexity, - [Simple|Complex] - wallet (str): the path to the wallet on whose behalf the - copies are got - cid (str): ID of the container - oid (str): ID of the Object - shell: executor for cli command - Returns: - (int): the number of object copies in the container - """ - return ( - get_simple_object_copies(wallet, cid, oid, shell, nodes) - if complexity == "Simple" - else get_complex_object_copies(wallet, cid, oid, shell, nodes) - ) - - -@reporter.step("Get Simple Object Copies") -def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: - """ - To figure out the number of a simple object copies, only direct - HEAD requests should be made to the every node of the container. - We consider non-empty HEAD response as a stored object copy. - Args: - wallet (str): the path to the wallet on whose behalf the - copies are got - cid (str): ID of the container - oid (str): ID of the Object - shell: executor for cli command - nodes: nodes to search on - Returns: - (int): the number of object copies in the container - """ - copies = 0 - for node in nodes: - try: - response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) - if response: - logger.info(f"Found object {oid} on node {node}") - copies += 1 - except Exception: - logger.info(f"No {oid} object copy found on {node}, continue") - continue - return copies - - -@reporter.step("Get Complex Object Copies") -def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: - """ - To figure out the number of a complex object copies, we firstly - need to retrieve its Last object. We consider that the number of - complex object copies is equal to the number of its last object - copies. When we have the Last object ID, the task is reduced - to getting simple object copies. - Args: - wallet (str): the path to the wallet on whose behalf the - copies are got - cid (str): ID of the container - oid (str): ID of the Object - shell: executor for cli command - Returns: - (int): the number of object copies in the container - """ - last_oid = get_last_object(wallet, cid, oid, shell, nodes) - assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes" - return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) - - -@reporter.step("Get Nodes With Object") -def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: - """ - The function returns list of nodes which store - the given object. - Args: - cid (str): ID of the container which store the object - oid (str): object ID - shell: executor for cli command - nodes: nodes to find on - Returns: - (list): nodes which store the object - """ - - nodes_list = [] - for node in nodes: - wallet = WalletInfo.from_node(node) - try: - res = head_object( - wallet, - cid, - oid, - shell=shell, - endpoint=node.get_rpc_endpoint(), - is_direct=True, - ) - if res is not None: - logger.info(f"Found object {oid} on node {node}") - nodes_list.append(node) - except Exception: - logger.info(f"No {oid} object copy found on {node}, continue") - continue - return nodes_list - - -@reporter.step("Get Nodes Without Object") -def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: - """ - The function returns list of nodes which do not store - the given object. - Args: - wallet (str): the path to the wallet on whose behalf - we request the nodes - cid (str): ID of the container which store the object - oid (str): object ID - shell: executor for cli command - Returns: - (list): nodes which do not store the object - """ - nodes_list = [] - for node in nodes: - try: - res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) - if res is None: - nodes_list.append(node) - except Exception as err: - if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND): - nodes_list.append(node) - else: - raise Exception(f"Got error {err} on head object command") from err - return nodes_list diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py deleted file mode 100644 index 27f75d5..0000000 --- a/src/frostfs_testlib/steps/tombstone.py +++ /dev/null @@ -1,24 +0,0 @@ -import logging - -from frostfs_testlib import reporter -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.object import head_object -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Verify Head Tombstone") -def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): - header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] - - s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] - logger.info(f"Header Session OIDs is {s_oid}") - logger.info(f"OID is {oid}") - - assert header["containerID"] == cid, "Tombstone Header CID is wrong" - assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong" - assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" - assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" - assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" - assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py deleted file mode 100644 index cbbef84..0000000 --- a/src/frostfs_testlib/storage/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from frostfs_testlib.storage.service_registry import ServiceRegistry - -__class_registry = ServiceRegistry() - - -def get_service_registry() -> ServiceRegistry: - """Returns registry with registered classes related to cluster and cluster nodes. - - ServiceClassRegistry is a singleton instance that can be configured with multiple classes that - represents service on the cluster physical node. - - Returns: - Singleton ServiceClassRegistry instance. - """ - return __class_registry diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py deleted file mode 100644 index b67e34d..0000000 --- a/src/frostfs_testlib/storage/cluster.py +++ /dev/null @@ -1,329 +0,0 @@ -import random -import re - -import yaml -from yarl import URL - -from frostfs_testlib import reporter -from frostfs_testlib.hosting import Host, Hosting -from frostfs_testlib.hosting.config import ServiceConfig -from frostfs_testlib.storage import get_service_registry -from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml -from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.metrics import Metrics -from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces -from frostfs_testlib.storage.service_registry import ServiceRegistry - - -class ClusterNode: - """ - Represents physical node where multiple different services may be located - """ - - class_registry: ServiceRegistry - id: int - host: Host - metrics: Metrics - - def __init__(self, host: Host, id: int) -> None: - self.host = host - self.id = id - self.class_registry = get_service_registry() - self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint()) - - @property - def host_ip(self): - return self.host.config.address - - def __eq__(self, other): - return self.host.config.address == other.host.config.address - - def __hash__(self): - return id(self.host.config.address) - - def __str__(self): - return self.host.config.address - - def __repr__(self) -> str: - return self.host.config.address - - # for backward compatibility and to not touch other codebase too much - @property - def storage_node(self) -> StorageNode: - return self.service(StorageNode) - - # for backward compatibility and to not touch other codebase too much - @property - def ir_node(self) -> InnerRing: - return self.service(InnerRing) - - # for backward compatibility and to not touch other codebase too much - @property - def morph_chain(self) -> MorphChain: - return self.service(MorphChain) - - # for backward compatibility and to not touch other codebase too much - @property - def http_gate(self) -> HTTPGate: - return self.service(HTTPGate) - - # for backward compatibility and to not touch other codebase too much - @property - def s3_gate(self) -> S3Gate: - return self.service(S3Gate) - - # TODO: Deprecated. Use config with ServiceConfigurationYml interface - def get_config(self, config_file_path: str) -> dict: - shell = self.host.get_shell() - - result = shell.exec(f"cat {config_file_path}") - config_text = result.stdout - - config = yaml.safe_load(config_text) - return config - - # TODO: Deprecated. Use config with ServiceConfigurationYml interface - def save_config(self, new_config: dict, config_file_path: str) -> None: - shell = self.host.get_shell() - - config_str = yaml.dump(new_config) - shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - - def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: - return self.service(service_type).config - - def service(self, service_type: type[ServiceClass]) -> ServiceClass: - """ - Get a service cluster node of specified type. - - Args: - service_type: type of the service which should be returned, - for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing. - - Returns: - service of service_type class. - """ - - service_entry = self.class_registry.get_entry(service_type) - service_name = service_entry["hosting_service_name"] - - pattern = f"{service_name}_{self.id:02}" - config = self.host.get_service_config(pattern) - - return service_type( - self.id, - config.name, - self.host, - ) - - @property - def services(self) -> list[NodeBase]: - svcs: list[NodeBase] = [] - svcs_names_on_node = [svc.name for svc in self.host.config.services] - for entry in self.class_registry._class_mapping.values(): - hosting_svc_name = entry["hosting_service_name"] - pattern = f"{hosting_svc_name}_{self.id:02}" - if pattern in svcs_names_on_node: - config = self.host.get_service_config(pattern) - svcs.append( - entry["cls"]( - self.id, - config.name, - self.host, - ) - ) - - return svcs - - def get_all_interfaces(self) -> dict[str, str]: - return self.host.config.interfaces - - def get_interface(self, interface: Interfaces) -> str: - return self.host.config.interfaces[interface.value] - - def get_data_interfaces(self) -> list[str]: - return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] - - def get_data_interface(self, search_interface: str) -> list[str]: - return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] - - def get_internal_interfaces(self) -> list[str]: - return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] - - def get_internal_interface(self, search_internal: str) -> list[str]: - return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] - - -class Cluster: - """ - This class represents a Cluster object for the whole storage based on provided hosting - """ - - default_rpc_endpoint: str - default_s3_gate_endpoint: str - default_http_gate_endpoint: str - - def __init__(self, hosting: Hosting) -> None: - self._hosting = hosting - - self.class_registry = get_service_registry() - self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() - self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() - self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() - - @property - def hosts(self) -> list[Host]: - """ - Returns list of Hosts - """ - return self._hosting.hosts - - # for backward compatibility and to not touch other codebase too much - @property - def storage_nodes(self) -> list[StorageNode]: - return self.services(StorageNode) - - # for backward compatibility and to not touch other codebase too much - @property - def ir_nodes(self) -> list[InnerRing]: - return self.services(InnerRing) - - # for backward compatibility and to not touch other codebase too much - @property - def s3_gates(self) -> list[S3Gate]: - return self.services(S3Gate) - - @property - def http_gates(self) -> list[HTTPGate]: - return self.services(HTTPGate) - - @property - def morph_chain(self) -> list[MorphChain]: - return self.services(MorphChain) - - def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]: - """ - Resolve which cluster nodes hosting the specified services. - - Args: - services: list of services to resolve hosting cluster nodes. - - Returns: - list of cluster nodes which host specified services. - """ - - cluster_nodes = set() - for service in services: - cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service]) - - return list(cluster_nodes) - - def node(self, service: ServiceClass) -> ClusterNode: - """ - Resolve single cluster node hosting the specified service. - - Args: - services: list of services to resolve hosting cluster nodes. - - Returns: - list of cluster nodes which host specified services. - """ - - nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service] - if not len(nodes): - raise RuntimeError(f"Cannot find service {service} on any node") - - return nodes[0] - - def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: - """ - Get all services in a cluster of specified type. - - Args: - service_type: type of the services which should be returned, - for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing. - - Returns: - list of services of service_type class. - """ - - service = self.class_registry.get_entry(service_type) - service_name = service["hosting_service_name"] - cls: type[NodeBase] = service["cls"] - - pattern = f"{service_name}_\d*$" - configs = self.hosting.find_service_configs(pattern) - - found_nodes = [] - for config in configs: - # config.name is something like s3-gate01. Cut last digits to know service type - service_type = re.findall("(.*)_\d+", config.name)[0] - # exclude unsupported services - if service_type != service_name: - continue - - found_nodes.append( - cls( - self._get_id(config.name), - config.name, - self.hosting.get_host_by_service(config.name), - ) - ) - return found_nodes - - @property - def cluster_nodes(self) -> list[ClusterNode]: - """ - Returns list of Cluster Nodes - """ - - return [ClusterNode(host, id) for id, host in enumerate(self.hosts, start=1)] - - @property - def hosting(self) -> Hosting: - return self._hosting - - def _create_wallet_config(self, service: ServiceConfig) -> None: - wallet_path = service.attributes[ConfigAttributes.LOCAL_WALLET_CONFIG] - wallet_password = service.attributes[ConfigAttributes.WALLET_PASSWORD] - with open(wallet_path, "w") as file: - yaml.dump({"password": wallet_password}, file) - - def create_wallet_configs(self, hosting: Hosting) -> None: - configs = hosting.find_service_configs(".*") - for config in configs: - if ConfigAttributes.LOCAL_WALLET_CONFIG in config.attributes: - self._create_wallet_config(config) - - def is_local_devenv(self) -> bool: - if len(self.hosting.hosts) == 1: - host = self.hosting.hosts[0] - if host.config.address == "localhost" and host.config.plugin_name == "docker": - return True - return False - - def _get_id(self, node_name) -> int: - pattern = "\d*$" - - matches = re.search(pattern, node_name) - if not matches: - raise RuntimeError(f"Can't parse Id of the node {node_name}") - return int(matches.group()) - - def get_random_storage_rpc_endpoint(self) -> str: - return random.choice(self.get_storage_rpc_endpoints()) - - def get_storage_rpc_endpoints(self) -> list[str]: - nodes: list[StorageNode] = self.services(StorageNode) - return [node.get_rpc_endpoint() for node in nodes] - - def get_morph_endpoints(self) -> list[str]: - nodes: list[MorphChain] = self.services(MorphChain) - return [node.get_endpoint() for node in nodes] - - def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: - cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips] - with reporter.step(f"Return cluster nodes - {cluster_nodes}"): - return cluster_nodes diff --git a/src/frostfs_testlib/storage/configuration/interfaces.py b/src/frostfs_testlib/storage/configuration/interfaces.py deleted file mode 100644 index b2bc683..0000000 --- a/src/frostfs_testlib/storage/configuration/interfaces.py +++ /dev/null @@ -1,65 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any - - -class ServiceConfigurationYml(ABC): - """ - Class to manipulate yml configuration for service - """ - - def _find_option(self, key: str, data: dict): - tree = key.split(":") - current = data - for node in tree: - if isinstance(current, list) and len(current) - 1 >= int(node): - current = current[int(node)] - continue - - if node not in current: - return None - - current = current[node] - - return current - - def _set_option(self, key: str, value: Any, data: dict): - tree = key.split(":") - current = data - for node in tree[:-1]: - if isinstance(current, list) and len(current) - 1 >= int(node): - current = current[int(node)] - continue - - if node not in current: - current[node] = {} - - current = current[node] - - current[tree[-1]] = value - - @abstractmethod - def get(self, key: str) -> str: - """ - Get parameter value from current configuration - - Args: - key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase' - - Returns: - value of the parameter - """ - - @abstractmethod - def set(self, values: dict[str, Any]): - """ - Sets parameters to configuration - - Args: - values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set - """ - - @abstractmethod - def revert(self): - """ - Revert changes - """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py deleted file mode 100644 index fddd64a..0000000 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import re -from typing import Any - -import yaml - -from frostfs_testlib import reporter -from frostfs_testlib.shell.interfaces import CommandOptions, Shell -from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml - - -def extend_dict(extend_me: dict, extend_by: dict): - if isinstance(extend_by, dict): - for k, v in extend_by.items(): - if k in extend_me: - extend_dict(extend_me.get(k), v) - else: - extend_me[k] = v - else: - extend_me += extend_by - - -class ServiceConfiguration(ServiceConfigurationYml): - def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None: - self.service_name = service_name - self.shell = shell - self.main_config_path = main_config_path - self.confd_path = os.path.join(config_dir, "conf.d") - self.custom_file = os.path.join(self.confd_path, "99_changes.yml") - - def _path_exists(self, path: str) -> bool: - return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code - - def _get_config_files(self): - config_files = [self.main_config_path] - - if self._path_exists(self.confd_path): - files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() - # Sorting files in backwards order from latest to first one - config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) - - return config_files - - def _get_configuration(self, config_files: list[str]) -> dict: - if not config_files: - return [{}] - - splitter = "+++++" - files_str = " ".join(config_files) - all_content = self.shell.exec( - f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done" - ).stdout - files_content = all_content.split("+++++")[1:] - files_data = [yaml.safe_load(file_content) for file_content in files_content] - - mergedData = {} - for data in files_data: - extend_dict(mergedData, data) - - return mergedData - - def get(self, key: str) -> str | Any: - with reporter.step(f"Get {key} configuration value for {self.service_name}"): - config_files = self._get_config_files() - configuration = self._get_configuration(config_files) - result = self._find_option(key, configuration) - return result - - def set(self, values: dict[str, Any]): - with reporter.step(f"Change configuration for {self.service_name}"): - if not self._path_exists(self.confd_path): - self.shell.exec(f"mkdir {self.confd_path}") - - if self._path_exists(self.custom_file): - data = self._get_configuration([self.custom_file]) - else: - data = {} - - for key, value in values.items(): - self._set_option(key, value, data) - - content = yaml.dump(data) - self.shell.exec(f"echo '{content}' | sudo tee {self.custom_file}") - self.shell.exec(f"chmod 777 {self.custom_file}") - - def revert(self): - with reporter.step(f"Revert changed options for {self.service_name}"): - self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py deleted file mode 100644 index 2e49208..0000000 --- a/src/frostfs_testlib/storage/constants.py +++ /dev/null @@ -1,30 +0,0 @@ -class ConfigAttributes: - SERVICE_NAME = "systemd_service_name" - WALLET_PASSWORD = "wallet_password" - WALLET_PATH = "wallet_path" - WALLET_CONFIG = "wallet_config" - CONFIG_DIR = "service_config_dir" - CONFIG_PATH = "config_path" - WORKING_DIR = "working_dir" - SHARD_CONFIG_PATH = "shard_config_path" - LOGGER_CONFIG_PATH = "logger_config_path" - LOCAL_WALLET_PATH = "local_wallet_path" - LOCAL_WALLET_CONFIG = "local_wallet_config_path" - REMOTE_WALLET_CONFIG = "remote_wallet_config_path" - ENDPOINT_DATA_0 = "endpoint_data0" - ENDPOINT_DATA_1 = "endpoint_data1" - ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" - ENDPOINT_INTERNAL = "endpoint_internal0" - ENDPOINT_PROMETHEUS = "endpoint_prometheus" - ENDPOINT_PPROF = "endpoint_pprof" - CONTROL_ENDPOINT = "control_endpoint" - UN_LOCODE = "un_locode" - - -class PlacementRule: - DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" - SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" - REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" - REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" - DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" - EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/controllers/__init__.py b/src/frostfs_testlib/storage/controllers/__init__.py deleted file mode 100644 index 65268f4..0000000 --- a/src/frostfs_testlib/storage/controllers/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController -from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController -from frostfs_testlib.storage.controllers.disk_controller import DiskController, DiskInfo -from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py deleted file mode 100644 index 5628282..0000000 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ /dev/null @@ -1,225 +0,0 @@ -import copy -from datetime import datetime - -import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib import reporter -from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner -from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType -from frostfs_testlib.load.load_report import LoadReport -from frostfs_testlib.load.load_verifiers import LoadVerifier -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.testing.parallel import parallel -from frostfs_testlib.testing.test_control import run_optionally - - -class BackgroundLoadController: - k6_dir: str - load_params: LoadParams - original_load_params: LoadParams - verification_params: LoadParams - cluster_nodes: list[ClusterNode] - nodes_under_load: list[ClusterNode] - load_counter: int - load_summaries: dict - endpoints: list[str] - runner: ScenarioRunner - started: bool - load_reporters: list[LoadReport] - - def __init__( - self, - k6_dir: str, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - runner: ScenarioRunner, - ) -> None: - self.k6_dir = k6_dir - self.original_load_params = load_params - self.load_params = copy.deepcopy(self.original_load_params) - self.cluster_nodes = cluster_nodes - self.nodes_under_load = nodes_under_load - self.load_counter = 1 - self.runner = runner - self.started = False - self.load_reporters = [] - if load_params.endpoint_selection_strategy is None: - raise RuntimeError("endpoint_selection_strategy should not be None") - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) - def _get_endpoints(self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy): - all_endpoints = { - LoadType.gRPC: { - EndpointSelectionStrategy.ALL: list( - set( - endpoint - for node_under_load in self.nodes_under_load - for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint() - ) - ), - EndpointSelectionStrategy.FIRST: list( - set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load) - ), - }, - # for some reason xk6 appends http protocol on its own - LoadType.S3: { - EndpointSelectionStrategy.ALL: list( - set( - endpoint - for node_under_load in self.nodes_under_load - for endpoint in node_under_load.service(S3Gate).get_all_endpoints() - ) - ), - EndpointSelectionStrategy.FIRST: list( - set(node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load) - ), - }, - } - - return all_endpoints[load_type][endpoint_selection_strategy] - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Init k6 instances") - def init_k6(self): - self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) - self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Prepare load instances") - def prepare(self): - self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) - self.init_k6() - - def append_reporter(self, load_report: LoadReport): - self.load_reporters.append(load_report) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def start(self): - with reporter.step(f"Start load on nodes {self.nodes_under_load}"): - self.runner.start() - self.started = True - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Stop load") - def stop(self): - self.runner.stop() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) - def is_running(self) -> bool: - return self.runner.is_running - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Reset load") - def _reset_for_consequent_load(self): - """This method is required if we want to run multiple loads during test run. - Raise load counter by 1 and append it to load_id - """ - self.load_counter += 1 - self.load_params = copy.deepcopy(self.original_load_params) - self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Startup load") - def startup(self): - self.prepare() - self.preset() - self.start() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def preset(self): - self.runner.preset() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Stop and get results of load") - def teardown(self): - if not self.started: - return - - self.stop() - self.load_summaries = self._get_results() - self.started = False - - start_time = min(self._get_start_times()) - end_time = max(self._get_end_times()) - - for load_report in self.load_reporters: - load_report.set_start_time(start_time) - load_report.set_end_time(end_time) - load_report.add_summaries(self.load_summaries) - - def _get_start_times(self) -> list[datetime]: - futures = parallel([k6.get_start_time for k6 in self.runner.get_k6_instances()]) - return [future.result() for future in futures] - - def _get_end_times(self) -> list[datetime]: - futures = parallel([k6.get_end_time for k6 in self.runner.get_k6_instances()]) - return [future.result() for future in futures] - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Run post-load verification") - def verify(self): - try: - load_issues = self._collect_load_issues() - if self.load_params.verify: - load_issues.extend(self._run_verify_scenario()) - - assert not load_issues, "\n".join(load_issues) - finally: - self._reset_for_consequent_load() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Collect load issues") - def _collect_load_issues(self): - verifier = LoadVerifier(self.load_params) - return verifier.collect_load_issues(self.load_summaries) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def wait_until_finish(self, soft_timeout: int = 0): - self.runner.wait_until_finish(soft_timeout) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Verify loaded objects") - def _run_verify_scenario(self) -> list[str]: - self.verification_params = LoadParams( - verify_clients=self.load_params.verify_clients, - scenario=LoadScenario.VERIFY, - read_from=self.load_params.read_from, - registry_file=self.load_params.registry_file, - verify_time=self.load_params.verify_time, - custom_registry=self.load_params.custom_registry, - load_type=self.load_params.load_type, - load_id=self.load_params.load_id, - vu_init_time=0, - working_dir=self.load_params.working_dir, - endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, - k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout=self.load_params.setup_timeout, - ) - - if self.verification_params.custom_registry: - self.verification_params.registry_file = self.load_params.custom_registry - - if self.verification_params.verify_time is None: - raise RuntimeError("verify_time should not be none") - - self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) - with reporter.step("Run verify scenario"): - self.runner.start() - self.runner.wait_until_finish() - - with reporter.step("Collect verify issues"): - verification_summaries = self._get_results() - verifier = LoadVerifier(self.load_params) - return verifier.collect_verify_issues(self.load_summaries, verification_summaries) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def _get_results(self) -> dict: - with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): - return self.runner.get_results() - - def __str__(self) -> str: - return self.load_params.__str__() - - def __repr__(self) -> str: - return repr(self.load_params) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py deleted file mode 100644 index 51aaefb..0000000 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ /dev/null @@ -1,543 +0,0 @@ -import itertools -import logging -import time -from datetime import datetime, timezone -from typing import TypeVar - -import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.cli.netmap_parser import NetmapParser -from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.hosting.interfaces import HostStatus -from frostfs_testlib.plugins import load_all -from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import MORPH_BLOCK_TIME -from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider -from frostfs_testlib.steps.network import IpHelper -from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph -from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode -from frostfs_testlib.storage.controllers.disk_controller import DiskController -from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success -from frostfs_testlib.utils.datetime_utils import parse_time - -logger = logging.getLogger("NeoLogger") - - -class StateManager: - def __init__(self, cluster_state_controller: "ClusterStateController") -> None: - self.csc = cluster_state_controller - - -StateManagerClass = TypeVar("StateManagerClass", bound=StateManager) - - -class ClusterStateController: - def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: - self.stopped_nodes: list[ClusterNode] = [] - self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: set[ClusterNode] = set() - self.excluded_from_netmap: list[StorageNode] = [] - self.stopped_services: set[NodeBase] = set() - self.cluster = cluster - self.healthcheck = healthcheck - self.shell = shell - self.suspended_services: dict[str, list[ClusterNode]] = {} - self.nodes_with_modified_interface: list[ClusterNode] = [] - self.managers: list[StateManagerClass] = [] - - # TODO: move all functionality to managers - managers = set(load_all(group="frostfs.testlib.csc_managers")) - for manager in managers: - self.managers.append(manager(self)) - - def manager(self, manager_type: type[StateManagerClass]) -> StateManagerClass: - for manager in self.managers: - # Subclasses here for the future if we have overriding subclasses of base interface - if issubclass(type(manager), manager_type): - return manager - - def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: - stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] - return set(stopped_by_node) - - def _get_stopped_by_type(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_by_type = [svc for svc in self.stopped_services if isinstance(svc, service_type)] - return set(stopped_by_type) - - def _from_stopped_nodes(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_on_nodes = set([node.service(service_type) for node in self.stopped_nodes]) - return set(stopped_on_nodes) - - def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_svc = self._get_stopped_by_type(service_type).union(self._from_stopped_nodes(service_type)) - online_svc = set(self.cluster.services(service_type)) - stopped_svc - return online_svc - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop host of node {node}") - def stop_node_host(self, node: ClusterNode, mode: str): - # Drop ssh connection for this node before shutdown - provider = SshConnectionProvider() - provider.drop(node.host_ip) - - self.stopped_nodes.append(node) - with reporter.step(f"Stop host {node.host.config.address}"): - node.host.stop_host(mode=mode) - self._wait_for_host_offline(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Shutdown whole cluster") - def shutdown_cluster(self, mode: str, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - - # Drop all ssh connections before shutdown - provider = SshConnectionProvider() - provider.drop_all() - - for node in nodes: - with reporter.step(f"Stop host {node.host.config.address}"): - self.stopped_nodes.append(node) - node.host.stop_host(mode=mode) - - for node in nodes: - self._wait_for_host_offline(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start host of node {node}") - def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): - with reporter.step(f"Start host {node.host.config.address}"): - node.host.start_host() - self._wait_for_host_online(node) - self.stopped_nodes.remove(node) - if startup_healthcheck: - self.wait_startup_healthcheck() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped hosts") - def start_stopped_hosts(self, reversed_order: bool = False): - if not self.stopped_nodes: - return - - nodes = reversed(self.stopped_nodes) if reversed_order else self.stopped_nodes - for node in nodes: - with reporter.step(f"Start host {node.host.config.address}"): - node.host.start_host() - self.stopped_services.difference_update(self._get_stopped_by_node(node)) - - self.stopped_nodes = [] - with reporter.step("Wait for all nodes to go online"): - parallel(self._wait_for_host_online, self.cluster.cluster_nodes) - - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Detach disk {device} at {mountpoint} on node {node}") - def detach_disk(self, node: StorageNode, device: str, mountpoint: str): - disk_controller = self._get_disk_controller(node, device, mountpoint) - self.detached_disks[disk_controller.id] = disk_controller - disk_controller.detach() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Attach disk {device} at {mountpoint} on node {node}") - def attach_disk(self, node: StorageNode, device: str, mountpoint: str): - disk_controller = self._get_disk_controller(node, device, mountpoint) - disk_controller.attach() - self.detached_disks.pop(disk_controller.id, None) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restore detached disks") - def restore_disks(self): - for disk_controller in self.detached_disks.values(): - disk_controller.attach() - self.detached_disks = {} - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all {service_type} services") - def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): - services = self.cluster.services(service_type) - self.stopped_services.update(services) - parallel([service.stop_service for service in services], mask=mask) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start all {service_type} services") - def start_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.start_service for service in services]) - self.stopped_services.difference_update(set(services)) - - if service_type == StorageNode: - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Send sighup to all {service_type} services") - def sighup_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.send_signal_to_service for service in services], signal="SIGHUP") - - if service_type == StorageNode: - self.wait_after_storage_startup() - - @wait_for_success(600, 60) - def wait_s3gate(self, s3gate: S3Gate): - with reporter.step(f"Wait for {s3gate} reconnection"): - result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") - assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" - - @reporter.step("Wait for S3Gates reconnection to local storage") - def wait_s3gates(self): - online_s3gates = self._get_online(S3Gate) - if online_s3gates: - parallel(self.wait_s3gate, online_s3gates) - - @reporter.step("Wait for cluster startup healtcheck") - def wait_startup_healthcheck(self): - nodes = self.cluster.nodes(self._get_online(StorageNode)) - parallel(self.healthcheck.startup_healthcheck, nodes) - - @reporter.step("Wait for storage reconnection to the system") - def wait_after_storage_startup(self): - self.wait_startup_healthcheck() - self.wait_s3gates() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start all stopped services") - def start_all_stopped_services(self): - stopped_storages = self._get_stopped_by_type(StorageNode) - parallel([service.start_service for service in self.stopped_services]) - self.stopped_services.clear() - - if stopped_storages: - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): - service = node.service(service_type) - service.stop_service(mask) - self.stopped_services.add(service) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Send sighup to {service_type} service on {node}") - def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.send_signal_to_service("SIGHUP") - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.start_service() - self.stopped_services.discard(service) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: ServiceClass): - stopped_svc = self._get_stopped_by_type(service_type) - if not stopped_svc: - return - - parallel([svc.start_service for svc in stopped_svc]) - self.stopped_services.difference_update(stopped_svc) - - if service_type == StorageNode: - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart {service_type} service on {node}") - def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.restart_service() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart all {service_type} services") - def restart_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.restart_service for service in services]) - - if service_type == StorageNode: - self.wait_after_storage_startup() - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode, mask: bool = True): - self.stop_service_of_type(node, StorageNode, mask) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start storage service on {node}") - def start_storage_service(self, node: ClusterNode): - self.start_service_of_type(node, StorageNode) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Suspend {process_name} service in {node}") - def suspend_service(self, process_name: str, node: ClusterNode): - node.host.wait_success_suspend_process(process_name) - if self.suspended_services.get(process_name): - self.suspended_services[process_name].append(node) - else: - self.suspended_services[process_name] = [node] - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Resume {process_name} service in {node}") - def resume_service(self, process_name: str, node: ClusterNode): - node.host.wait_success_resume_process(process_name) - if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: - self.suspended_services[process_name].remove(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start suspend processes services") - def resume_suspended_services(self): - for process_name, list_nodes in self.suspended_services.items(): - [node.host.wait_success_resume_process(process_name) for node in list_nodes] - self.suspended_services = {} - - @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") - def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: - interfaces_tables = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, interfaces_tables) - time.sleep(wakeup_timeout) - self.dropped_traffic.add(node) - - @reporter.step("Start traffic to {node}") - def restore_traffic(self, node: ClusterNode) -> None: - IpHelper.restore_input_traffic_to_node(node=node) - self.dropped_traffic.discard(node) - - @reporter.step("Restore blocked nodes") - def restore_all_traffic(self): - if not self.dropped_traffic: - return - parallel(self._restore_traffic_to_node, self.dropped_traffic) - self.dropped_traffic.clear() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): - shell = node.host.get_shell() - shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') - - options = CommandOptions(close_stdin=True, timeout=1, check=False) - shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) - - # Drop ssh connection for this node - provider = SshConnectionProvider() - provider.drop(node.host_ip) - - if wait_for_return: - # Let the things to be settled - # A little wait here to prevent ssh stuck during panic - time.sleep(10) - self._wait_for_host_online(node) - if startup_healthcheck: - self.wait_startup_healthcheck() - - @reporter.step("Down {interface} to {nodes}") - def down_interface(self, nodes: list[ClusterNode], interface: str): - for node in nodes: - node.host.down_interface(interface=interface) - assert node.host.check_state(interface=interface) == "DOWN" - self.nodes_with_modified_interface.append(node) - - @reporter.step("Up {interface} to {nodes}") - def up_interface(self, nodes: list[ClusterNode], interface: str): - for node in nodes: - node.host.up_interface(interface=interface) - assert node.host.check_state(interface=interface) == "UP" - if node in self.nodes_with_modified_interface: - self.nodes_with_modified_interface.remove(node) - - @reporter.step("Restore interface") - def restore_interfaces(self): - for node in self.nodes_with_modified_interface: - dict_interfaces = node.host.config.interfaces.keys() - for name_interface in dict_interfaces: - if "mgmt" not in name_interface: - node.host.up_interface(interface=name_interface) - - @reporter.step("Get node time") - def get_node_date(self, node: ClusterNode) -> datetime: - shell = node.host.get_shell() - return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") - - @reporter.step("Set time on nodes in {in_date}") - def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: - parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) - - @reporter.step("Set time on {node} to {in_date}") - def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: - shell = node.host.get_shell() - in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") - shell.exec(f"timedatectl set-time '{in_date_frmt}'") - node_time = self.get_node_date(node) - - with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (node_time - in_date).total_seconds() < 60 - - @reporter.step("Restore time on nodes") - def restore_date_on_all_nodes(self, cluster: Cluster) -> None: - parallel(self.restore_node_date, cluster.cluster_nodes) - - @reporter.step("Restore time on {node}") - def restore_node_date(self, node: ClusterNode) -> None: - shell = node.host.get_shell() - now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") - - with reporter.step(f"Set {now_time} time"): - shell.exec(f"timedatectl set-time '{now_time}'") - - @reporter.step("Set MaintenanceModeAllowed - {status}") - def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: - frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH - ) - frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - - @reporter.step("Set node status to {status} in CSC") - def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None: - rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() - control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() - - frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node) - node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout) - - if node_netinfo.maintenance_mode_allowed == "false": - with reporter.step("Enable maintenance mode"): - frostfs_adm.morph.set_config("MaintenanceModeAllowed=true") - - with reporter.step(f"Set node status to {status} using FrostfsCli"): - frostfs_cli_remote.control.set_status(control_endpoint, status.value) - - if not await_tick: - return - - with reporter.step("Tick 2 epoch with 2 block await."): - for _ in range(2): - frostfs_adm.morph.force_new_epoch() - time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) - - self.await_node_status(status, wallet, cluster_node) - - @wait_for_success(80, 8, title="Wait for node status become {status}") - def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): - frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - if not checker_node: - checker_node = cluster_node - netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] - if status == NodeStatus.OFFLINE: - assert ( - cluster_node.get_interface(Interfaces.MGMT) not in netmap - ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" - else: - assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" - - def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: - alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] - remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) - self.excluded_from_netmap.extend(removes_nodes) - - def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): - include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) - self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) - - def include_all_excluded_nodes(self): - if not self.excluded_from_netmap: - return - alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] - if not alive_node: - return - - for exclude_node in self.excluded_from_netmap.copy(): - self.include_node_to_netmap(exclude_node, alive_node) - - def _get_cli( - self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode - ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: - # TODO Move to service config - host = cluster_node.host - service_config = host.get_service_config(cluster_node.storage_node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) - frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) - frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) - return frostfs_adm, frostfs_cli, frostfs_cli_remote - - def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: - disk_controller_id = DiskController.get_id(node, device) - if disk_controller_id in self.detached_disks.keys(): - disk_controller = self.detached_disks[disk_controller_id] - else: - disk_controller = DiskController(node, device, mountpoint) - - return disk_controller - - @reporter.step("Restore traffic {node}") - def _restore_traffic_to_node(self, node): - IpHelper.restore_input_traffic_to_node(node) - - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: - interfaces_and_tables = set() - for node in nodes: - shell = node.host.get_shell() - lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() - - ips = [] - tables = [] - - for line in lines: - if "src" not in line or "table local" in line: - continue - parts = line.split() - ips.append(parts[-1]) - if "table" in line: - tables.append(parts[parts.index("table") + 1]) - tables.append(None) - - [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] - - return interfaces_and_tables - - @reporter.step("Ping node") - def _ping_host(self, node: ClusterNode): - options = CommandOptions(check=False) - return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code - - @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online") - def _wait_for_host_online(self, node: ClusterNode): - try: - ping_result = self._ping_host(node) - if ping_result != 0: - return HostStatus.OFFLINE - return node.host.get_host_status() - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return HostStatus.OFFLINE - - @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline") - def _wait_for_host_offline(self, node: ClusterNode): - try: - ping_result = self._ping_host(node) - if ping_result == 0: - return HostStatus.ONLINE - return node.host.get_host_status() - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return HostStatus.ONLINE - - @reporter.step("Get contract by domain - {domain_name}") - def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): - frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) - return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/controllers/disk_controller.py b/src/frostfs_testlib/storage/controllers/disk_controller.py deleted file mode 100644 index c2aa85c..0000000 --- a/src/frostfs_testlib/storage/controllers/disk_controller.py +++ /dev/null @@ -1,41 +0,0 @@ -from frostfs_testlib.hosting.interfaces import DiskInfo -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.storage.cluster import StorageNode -from frostfs_testlib.testing.test_control import wait_for_success - - -class DiskController: - def __init__(self, node: StorageNode, device: str, mountpoint: str) -> None: - self.node: StorageNode = node - self.device: str = device - self.device_by_label: str - self.mountpoint: str = mountpoint.strip() - self.disk_info: DiskInfo = DiskInfo() - self.id = self.get_id(node, device) - - shell = node.host.get_shell() - cmd = f"sudo udevadm info -n {device} | egrep \"S:.*label\" | awk '{{print $2}}'" - self.device_by_label = f"/dev/{shell.exec(cmd).stdout.strip()}" - - @wait_for_success(60, 3, False) - def _wait_until_detached(self): - return self.node.host.is_disk_attached(self.device, self.disk_info) - - @wait_for_success(60, 3, True) - def _wait_until_attached(self): - return self.node.host.is_disk_attached(self.device, self.disk_info) - - def detach(self): - self.disk_info = self.node.host.detach_disk(self.device) - self._wait_until_detached() - - def attach(self): - self.node.host.attach_disk(self.device, self.disk_info) - self._wait_until_attached() - remote_shell = self.node.host.get_shell() - remote_shell.exec(f"sudo umount -l {self.device}", options=CommandOptions(check=False)) - remote_shell.exec(f"sudo mount {self.device_by_label} {self.mountpoint}") - - @staticmethod - def get_id(node: StorageNode, device: str): - return f"{node.host.config.address} - {device}" diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py deleted file mode 100644 index 5017406..0000000 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ /dev/null @@ -1,117 +0,0 @@ -import json -from typing import Any - -from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.test_control import wait_for_success - - -class ShardsWatcher: - def __init__(self, node_under_test: ClusterNode) -> None: - self.shards_snapshots: list[dict[str, Any]] = [] - self.storage_node = node_under_test.storage_node - self.take_shards_snapshot() - - def take_shards_snapshot(self) -> None: - snapshot = self.get_shards_snapshot() - self.shards_snapshots.append(snapshot) - - def get_shards_snapshot(self) -> dict[str, Any]: - shards_snapshot: dict[str, Any] = {} - - shards = self.get_shards() - for shard in shards: - shards_snapshot[shard["shard_id"]] = shard - - return shards_snapshot - - def _get_current_snapshot(self) -> dict[str, Any]: - return self.shards_snapshots[-1] - - def _get_previous_snapshot(self) -> dict[str, Any]: - return self.shards_snapshots[-2] - - def _is_shard_present(self, shard_id) -> bool: - snapshot = self._get_current_snapshot() - return shard_id in snapshot - - def get_shards_with_new_errors(self) -> dict[str, Any]: - current_snapshot = self._get_current_snapshot() - previous_snapshot = self._get_previous_snapshot() - shards_with_new_errors: dict[str, Any] = {} - for shard_id, shard in previous_snapshot.items(): - if current_snapshot[shard_id]["error_count"] > shard["error_count"]: - shards_with_new_errors[shard_id] = current_snapshot[shard_id] - - return shards_with_new_errors - - def get_shards_with_errors(self) -> dict[str, Any]: - snapshot = self.get_shards_snapshot() - shards_with_errors: dict[str, Any] = {} - for shard_id, shard in snapshot.items(): - if shard["error_count"] > 0: - shards_with_errors[shard_id] = shard - - return shards_with_errors - - def get_shard_status(self, shard_id: str): # -> Any: - snapshot = self.get_shards_snapshot() - - assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" - - return snapshot[shard_id]["mode"] - - @wait_for_success(60, 2) - def await_for_all_shards_status(self, status: str) -> None: - snapshot = self.get_shards_snapshot() - - for shard_id in snapshot: - assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" - - @wait_for_success(60, 2) - def await_for_shard_status(self, shard_id: str, status: str) -> None: - assert self.get_shard_status(shard_id) == status - - @wait_for_success(60, 2) - def await_for_shard_have_new_errors(self, shard_id: str) -> None: - self.take_shards_snapshot() - assert self._is_shard_present(shard_id) - shards_with_new_errors = self.get_shards_with_new_errors() - - assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" - - @wait_for_success(300, 5) - def await_for_shards_have_no_new_errors(self) -> None: - self.take_shards_snapshot() - shards_with_new_errors = self.get_shards_with_new_errors() - assert len(shards_with_new_errors) == 0 - - def get_shards(self) -> dict[str, Any]: - shards_cli = FrostfsCliShards( - self.storage_node.host.get_shell(), - self.storage_node.host.get_cli_config("frostfs-cli").exec_path, - ) - - response = shards_cli.list( - endpoint=self.storage_node.get_control_endpoint(), - wallet=self.storage_node.get_remote_wallet_path(), - wallet_password=self.storage_node.get_wallet_password(), - json_mode=True, - ) - - return json.loads(response.stdout.split(">", 1)[1]) - - def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: - shards_cli = FrostfsCliShards( - self.storage_node.host.get_shell(), - self.storage_node.host.get_cli_config("frostfs-cli").exec_path, - ) - return shards_cli.set_mode( - endpoint=self.storage_node.get_control_endpoint(), - wallet=self.storage_node.get_remote_wallet_path(), - wallet_password=self.storage_node.get_wallet_password(), - mode=mode, - id=[shard_id], - clear_errors=clear_errors, - ) diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py deleted file mode 100644 index f0b2a21..0000000 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Any - -from frostfs_testlib import reporter -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager -from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -from frostfs_testlib.testing import parallel - - -class ConfigStateManager(StateManager): - def __init__(self, cluster_state_controller: ClusterStateController) -> None: - super().__init__(cluster_state_controller) - self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() - self.cluster = self.csc.cluster - - @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): - services = self.cluster.services(service_type) - nodes = self.cluster.nodes(services) - self.services_with_changed_config.update([(node, service_type) for node in nodes]) - - if not sighup: - self.csc.stop_services_of_type(service_type) - - parallel([node.config(service_type).set for node in nodes], values=values) - if not sighup: - self.csc.start_services_of_type(service_type) - else: - self.csc.sighup_services_of_type(service_type) - - @reporter.step("Change configuration for {service_type} on {node}") - def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): - self.services_with_changed_config.add((node, service_type)) - - self.csc.stop_service_of_type(node, service_type) - node.config(service_type).set(values) - self.csc.start_service_of_type(node, service_type) - - @reporter.step("Revert all configuration changes") - def revert_all(self, sighup: bool = False): - if not self.services_with_changed_config: - return - - parallel(self._revert_svc, self.services_with_changed_config, sighup) - self.services_with_changed_config.clear() - - if not sighup: - self.csc.start_all_stopped_services() - - # TODO: parallel can't have multiple parallel_items :( - @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): - node, service_type = node_and_service - service = node.service(service_type) - - if not sighup: - self.csc.stop_service_of_type(node, service_type) - - node.config(service_type).revert() - - if sighup: - service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/__init__.py b/src/frostfs_testlib/storage/dataclasses/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py deleted file mode 100644 index 362dee9..0000000 --- a/src/frostfs_testlib/storage/dataclasses/acl.py +++ /dev/null @@ -1,100 +0,0 @@ -import logging -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Union - -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.readable import HumanReadableEnum -from frostfs_testlib.utils import wallet_utils - -logger = logging.getLogger("NeoLogger") -EACL_LIFETIME = 100500 -FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 - - -class EACLOperation(HumanReadableEnum): - PUT = "put" - GET = "get" - HEAD = "head" - GET_RANGE = "getrange" - GET_RANGE_HASH = "getrangehash" - SEARCH = "search" - DELETE = "delete" - - -class EACLAccess(HumanReadableEnum): - ALLOW = "allow" - DENY = "deny" - - -class EACLRole(HumanReadableEnum): - OTHERS = "others" - USER = "user" - SYSTEM = "system" - - -class EACLHeaderType(HumanReadableEnum): - REQUEST = "req" # Filter request headers - OBJECT = "obj" # Filter object headers - SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only - - -class EACLMatchType(HumanReadableEnum): - STRING_EQUAL = "=" # Return true if strings are equal - STRING_NOT_EQUAL = "!=" # Return true if strings are different - - -@dataclass -class EACLFilter: - header_type: EACLHeaderType = EACLHeaderType.REQUEST - match_type: EACLMatchType = EACLMatchType.STRING_EQUAL - key: Optional[str] = None - value: Optional[str] = None - - def to_dict(self) -> Dict[str, Any]: - return { - "headerType": self.header_type, - "matchType": self.match_type, - "key": self.key, - "value": self.value, - } - - -@dataclass -class EACLFilters: - filters: Optional[List[EACLFilter]] = None - - def __str__(self): - return ",".join( - [f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters] - if self.filters - else [] - ) - - -@dataclass -class EACLPubKey: - keys: Optional[List[str]] = None - - -@dataclass -class EACLRule: - operation: Optional[EACLOperation] = None - access: Optional[EACLAccess] = None - role: Optional[Union[EACLRole, WalletInfo]] = None - filters: Optional[EACLFilters] = None - - def to_dict(self) -> Dict[str, Any]: - return { - "Operation": self.operation, - "Access": self.access, - "Role": self.role, - "Filters": self.filters or [], - } - - def __str__(self): - role = "" - if isinstance(self.role, EACLRole): - role = self.role.value - if isinstance(self.role, WalletInfo): - role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}" - return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py deleted file mode 100644 index 1199435..0000000 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ /dev/null @@ -1,154 +0,0 @@ -import logging -from dataclasses import dataclass -from enum import Enum -from typing import Optional - -from frostfs_testlib.testing.readable import HumanReadableEnum -from frostfs_testlib.utils import string_utils - -logger = logging.getLogger("NeoLogger") -EACL_LIFETIME = 100500 -FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 - - -class ObjectOperations(HumanReadableEnum): - PUT = "object.put" - PATCH = "object.patch" - GET = "object.get" - HEAD = "object.head" - GET_RANGE = "object.range" - GET_RANGE_HASH = "object.hash" - SEARCH = "object.search" - DELETE = "object.delete" - WILDCARD_ALL = "object.*" - - @staticmethod - def get_all(): - return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] - - -class ContainerOperations(HumanReadableEnum): - PUT = "container.put" - GET = "container.get" - LIST = "container.list" - DELETE = "container.delete" - WILDCARD_ALL = "container.*" - - @staticmethod - def get_all(): - return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] - - -@dataclass -class Operations: - GET_CONTAINER = "GetContainer" - PUT_CONTAINER = "PutContainer" - DELETE_CONTAINER = "DeleteContainer" - LIST_CONTAINER = "ListContainers" - GET_OBJECT = "GetObject" - DELETE_OBJECT = "DeleteObject" - HASH_OBJECT = "HashObject" - RANGE_OBJECT = "RangeObject" - SEARCH_OBJECT = "SearchObject" - HEAD_OBJECT = "HeadObject" - PUT_OBJECT = "PutObject" - PATCH_OBJECT = "PatchObject" - - -class Verb(HumanReadableEnum): - ALLOW = "allow" - DENY = "deny" - - -class Role(HumanReadableEnum): - OWNER = "owner" - IR = "ir" - CONTAINER = "container" - OTHERS = "others" - - -class ConditionType(HumanReadableEnum): - RESOURCE = "ResourceCondition" - REQUEST = "RequestCondition" - - -# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53 -class ConditionKey(HumanReadableEnum): - ROLE = '"\\$Actor:role"' - PUBLIC_KEY = '"\\$Actor:publicKey"' - OBJECT_TYPE = '"\\$Object:objectType"' - OBJECT_ID = '"\\$Object:objectID"' - - -class MatchType(HumanReadableEnum): - EQUAL = "=" - NOT_EQUAL = "!=" - - -@dataclass -class Condition: - condition_key: ConditionKey | str - condition_value: str - condition_type: ConditionType = ConditionType.REQUEST - match_type: MatchType = MatchType.EQUAL - - def as_string(self): - key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key - value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value - - return f"{self.condition_type.value}:{key}{self.match_type.value}{value}" - - @staticmethod - def by_role(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.ROLE, *args, **kwargs) - - @staticmethod - def by_key(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) - - @staticmethod - def by_object_type(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) - - @staticmethod - def by_object_id(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.OBJECT_ID, *args, **kwargs) - - -class Rule: - def __init__( - self, - access: Verb, - operations: list[ObjectOperations] | ObjectOperations, - conditions: list[Condition] | Condition = None, - chain_id: Optional[str] = None, - ) -> None: - self.access = access - self.operations = operations - - if not conditions: - self.conditions = [] - elif isinstance(conditions, Condition): - self.conditions = [conditions] - else: - self.conditions = conditions - - if not isinstance(self.conditions, list): - raise RuntimeError("Conditions must be a list") - - if not operations: - self.operations = [] - elif isinstance(operations, (ObjectOperations, ContainerOperations)): - self.operations = [operations] - else: - self.operations = operations - - if not isinstance(self.operations, list): - raise RuntimeError("Operations must be a list") - - self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-") - - def as_string(self): - conditions = " ".join([cond.as_string() for cond in self.conditions]) - operations = " ".join([op.value for op in self.operations]) - return f"{self.access.value} {operations} {conditions} *" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py deleted file mode 100644 index 4f5c348..0000000 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ /dev/null @@ -1,183 +0,0 @@ -import yaml - -from frostfs_testlib.blockchain import RPCClient -from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.storage.dataclasses.node_base import NodeBase -from frostfs_testlib.storage.dataclasses.shard import Shard - - -class InnerRing(NodeBase): - """ - Class represents inner ring node in a cluster - - Inner ring node is not always the same as physical host (or physical node, if you will): - It can be service running in a container or on physical host - For testing perspective, it's not relevant how it is actually running, - since frostfs network will still treat it as "node" - """ - - def service_healthcheck(self) -> bool: - health_metric = "frostfs_ir_ir_health" - output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout - return health_metric in output - - def get_netmap_cleaner_threshold(self) -> str: - config_file = self.get_remote_config_path() - contents = self.host.get_shell().exec(f"cat {config_file}").stdout - - config = yaml.safe_load(contents) - value = config["netmap_cleaner"]["threshold"] - - return value - - -class S3Gate(NodeBase): - """ - Class represents S3 gateway in a cluster - """ - - def get_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) - - def get_ns_endpoint(self, ns_name: str) -> str: - return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) - - def get_all_endpoints(self) -> list[str]: - return [ - self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), - self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), - ] - - def get_ns_endpoint(self, ns_name: str) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) - - def service_healthcheck(self) -> bool: - health_metric = "frostfs_s3_gw_state_health" - output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout - return health_metric in output - - @property - def label(self) -> str: - return f"{self.name}: {self.get_endpoint()}" - - -class HTTPGate(NodeBase): - """ - Class represents HTTP gateway in a cluster - """ - - def get_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) - - def service_healthcheck(self) -> bool: - health_metric = "frostfs_http_gw_state_health" - output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout - return health_metric in output - - @property - def label(self) -> str: - return f"{self.name}: {self.get_endpoint()}" - - -class MorphChain(NodeBase): - """ - Class represents side-chain aka morph-chain consensus node in a cluster - - Consensus node is not always the same as physical host (or physical node, if you will): - It can be service running in a container or on physical host - For testing perspective, it's not relevant how it is actually running, - since frostfs network will still treat it as "node" - """ - - rpc_client: RPCClient - - def construct(self): - self.rpc_client = RPCClient(self.get_endpoint()) - - def get_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) - - def service_healthcheck(self) -> bool: - # TODO Rework in 1.3 Release when metrics for each service will be available - return True - - @property - def label(self) -> str: - return f"{self.name}: {self.get_endpoint()}" - - def get_http_endpoint(self) -> str: - return self._get_attribute("http_endpoint") - - -class StorageNode(NodeBase): - """ - Class represents storage node in a storage cluster - - Storage node is not always the same as physical host: - It can be service running in a container or on physical host (or physical node, if you will): - For testing perspective, it's not relevant how it is actually running, - since frostfs network will still treat it as "node" - """ - - def get_rpc_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) - - def get_all_rpc_endpoint(self) -> list[str]: - return [ - self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), - self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), - ] - - def service_healthcheck(self) -> bool: - health_metric = "frostfs_node_state_health" - output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout - return health_metric in output - - # TODO: Deprecated. Use new approach with config - def get_shard_config_path(self) -> str: - return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) - - # TODO: Deprecated. Use new approach with config - def get_shards_config(self) -> tuple[str, dict]: - return self.get_config(self.get_shard_config_path()) - - def get_shards(self) -> list[Shard]: - shards = self.config.get("storage:shard") - - if not shards: - raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}") - - if "default" in shards: - shards.pop("default") - return [Shard.from_object(shard) for shard in shards.values()] - - def get_control_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) - - def get_un_locode(self): - return self._get_attribute(ConfigAttributes.UN_LOCODE) - - def get_data_directory(self) -> str: - return self.host.get_data_directory(self.name) - - def delete_blobovnicza(self): - self.host.delete_blobovnicza(self.name) - - def delete_fstree(self): - self.host.delete_fstree(self.name) - - def delete_file(self, file_path: str) -> None: - self.host.delete_file(file_path) - - def is_file_exist(self, file_path) -> bool: - return self.host.is_file_exist(file_path) - - def delete_metabase(self): - self.host.delete_metabase(self.name) - - def delete_write_cache(self): - self.host.delete_write_cache(self.name) - - @property - def label(self) -> str: - return f"{self.name}: {self.get_rpc_endpoint()}" diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py deleted file mode 100644 index 8969015..0000000 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ /dev/null @@ -1,80 +0,0 @@ -import time -from functools import wraps -from typing import Callable - -import pytest - -from frostfs_testlib.hosting import Host -from frostfs_testlib.shell.interfaces import CommandResult - - -class Metrics: - def __init__(self, host: Host, metrics_endpoint: str) -> None: - self.storage = StorageMetrics(host, metrics_endpoint) - - -class StorageMetrics: - """ - Class represents storage metrics in a cluster - """ - - def __init__(self, host: Host, metrics_endpoint: str) -> None: - self.host = host - self.metrics_endpoint = metrics_endpoint - - def get_metrics_search_by_greps(self, **greps) -> CommandResult: - """ - Get a metrics, search by: cid, metric_type, shard_id etc. - Args: - greps: dict of grep-command-name and value - for example get_metrics_search_by_greps(command='container_objects_total', cid='123456') - Return: - result of metrics - """ - shell = self.host.get_shell() - additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) - result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") - return result - - def get_all_metrics(self) -> CommandResult: - shell = self.host.get_shell() - result = shell.exec(f"curl -s {self.metrics_endpoint}") - return result - - -def wait_until_metric_result_is_stable( - relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 -): - """ - A decorator function that repeatedly calls the decorated function until its result stabilizes - within a specified relative tolerance or until the maximum number of attempts is reached. - - This decorator is useful for scenarios where a function returns a metric or value that may fluctuate - over time, and you want to ensure that the result has stabilized before proceeding. - """ - - def decorator(func: Callable): - @wraps(func) - def wrapper(*args, **kwargs): - last_result = None - for _ in range(max_attempts): - # first function call - first_result = func(*args, **kwargs) - - # waiting before the second call - time.sleep(sleep_interval) - - # second function call - last_result = func(*args, **kwargs) - - # checking value stability - if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): - return last_result - - # if stability is not achieved, return the last value - if last_result is not None: - return last_result - - return wrapper - - return decorator diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py deleted file mode 100644 index 5c8b723..0000000 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ /dev/null @@ -1,224 +0,0 @@ -from abc import abstractmethod -from dataclasses import dataclass -from datetime import datetime, timezone -from typing import Optional, TypedDict, TypeVar - -import yaml -from dateutil import parser - -from frostfs_testlib import reporter -from frostfs_testlib.hosting.config import ServiceConfig -from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml -from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.testing.readable import HumanReadableABC -from frostfs_testlib.utils import wallet_utils - - -@dataclass -class NodeBase(HumanReadableABC): - """ - Represents a node of some underlying service - """ - - id: str - name: str - host: Host - _process_name: str - - def __init__(self, id, name, host) -> None: - self.id = id - self.name = name - self.host = host - self.construct() - - def construct(self): - pass - - def __eq__(self, other): - return self.name == other.name - - def __hash__(self): - return id(self.name) - - def __str__(self): - return self.label - - def __repr__(self) -> str: - return self.label - - @property - def label(self) -> str: - return self.name - - def get_service_systemctl_name(self) -> str: - return self._get_attribute(ConfigAttributes.SERVICE_NAME) - - def get_process_name(self) -> str: - return self._process_name - - def start_service(self): - with reporter.step(f"Unmask {self.name} service on {self.host.config.address}"): - self.host.unmask_service(self.name) - - with reporter.step(f"Start {self.name} service on {self.host.config.address}"): - self.host.start_service(self.name) - - def send_signal_to_service(self, signal: str): - with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): - self.host.send_signal_to_service(self.name, signal) - - @abstractmethod - def service_healthcheck(self) -> bool: - """Service healthcheck.""" - - # TODO: Migrate to sub-class Metrcis (not yet exists :)) - def get_metric(self, metric: str) -> CommandResult: - shell = self.host.get_shell() - result = shell.exec(f"curl -s {self.get_metrics_endpoint()} | grep -e '^{metric}'") - return result - - def get_metrics_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) - - def get_pprof_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) - - def stop_service(self, mask: bool = True): - if mask: - with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): - self.host.mask_service(self.name) - - with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): - self.host.stop_service(self.name) - - def restart_service(self): - with reporter.step(f"Restart {self.name} service on {self.host.config.address}"): - self.host.restart_service(self.name) - - def get_wallet_password(self) -> str: - return self._get_attribute(ConfigAttributes.WALLET_PASSWORD) - - def get_wallet_path(self) -> str: - return self._get_attribute( - ConfigAttributes.LOCAL_WALLET_PATH, - ConfigAttributes.WALLET_PATH, - ) - - def get_remote_wallet_path(self) -> str: - """ - Returns node wallet file path located on remote host - """ - return self._get_attribute( - ConfigAttributes.WALLET_PATH, - ) - - def get_remote_config_path(self) -> str: - """ - Returns node config file path located on remote host - """ - return self._get_attribute( - ConfigAttributes.CONFIG_PATH, - ) - - def get_remote_wallet_config_path(self) -> str: - """ - Returns node config file path located on remote host - """ - return self._get_attribute( - ConfigAttributes.REMOTE_WALLET_CONFIG, - ) - - def get_wallet_config_path(self) -> str: - return self._get_attribute( - ConfigAttributes.LOCAL_WALLET_CONFIG, - ConfigAttributes.WALLET_CONFIG, - ) - - def get_logger_config_path(self) -> str: - """ - Returns config path for logger located on remote host - """ - config_attributes = self.host.get_service_config(self.name) - return ( - self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH) - if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes - else None - ) - - def get_working_dir_path(self) -> Optional[str]: - """ - Returns working directory path located on remote host - """ - config_attributes = self.host.get_service_config(self.name) - return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None - - @property - def config_dir(self) -> str: - return self._get_attribute(ConfigAttributes.CONFIG_DIR) - - @property - def main_config_path(self) -> str: - return self._get_attribute(ConfigAttributes.CONFIG_PATH) - - @property - def config(self) -> ServiceConfigurationYml: - return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path) - - # TODO: Deprecated. Use config with ServiceConfigurationYml interface - def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: - if config_file_path is None: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) - - shell = self.host.get_shell() - - result = shell.exec(f"cat {config_file_path}") - config_text = result.stdout - - config = yaml.safe_load(config_text) - return config_file_path, config - - # TODO: Deprecated. Use config with ServiceConfigurationYml interface - def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: - if config_file_path is None: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) - - shell = self.host.get_shell() - - config_str = yaml.dump(new_config) - shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - - def get_wallet_public_key(self): - storage_wallet_path = self.get_wallet_path() - storage_wallet_pass = self.get_wallet_password() - return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) - - def _get_attribute(self, attribute_name: str, default_attribute_name: Optional[str] = None) -> str: - config = self.host.get_service_config(self.name) - - if attribute_name not in config.attributes: - if default_attribute_name is None: - raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") - - return config.attributes[default_attribute_name] - - return config.attributes[attribute_name] - - def _get_service_config(self) -> ServiceConfig: - return self.host.get_service_config(self.name) - - def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") - start_time = parser.parse(result.stdout.strip()) - current_time = datetime.now(tz=timezone.utc) - active_time = current_time - start_time - return active_time - - -ServiceClass = TypeVar("ServiceClass", bound=NodeBase) - - -class NodeClassDict(TypedDict): - hosting_service_name: str - cls: type[NodeBase] diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py deleted file mode 100644 index 0429c78..0000000 --- a/src/frostfs_testlib/storage/dataclasses/object_size.py +++ /dev/null @@ -1,13 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class ObjectSize: - name: str - value: int - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/policy.py b/src/frostfs_testlib/storage/dataclasses/policy.py deleted file mode 100644 index 872ee05..0000000 --- a/src/frostfs_testlib/storage/dataclasses/policy.py +++ /dev/null @@ -1,13 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class PlacementPolicy: - name: str - value: str - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py deleted file mode 100644 index bebdbf5..0000000 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ /dev/null @@ -1,92 +0,0 @@ -from dataclasses import dataclass - -from configobj import ConfigObj - -SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" -BLOBSTOR_PREFIX = "_BLOBSTOR_" - - -@dataclass -class Blobstor: - path: str - path_type: str - - def __eq__(self, other) -> bool: - if not isinstance(other, self.__class__): - raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") - return self.path == other.path and self.path_type == other.path_type - - def __hash__(self): - return hash((self.path, self.path_type)) - - @staticmethod - def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str): - var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}" - return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE")) - - -@dataclass -class Shard: - blobstor: list[Blobstor] - metabase: str - writecache: str - pilorama: str - - def __eq__(self, other) -> bool: - if not isinstance(other, self.__class__): - raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") - return ( - set(self.blobstor) == set(other.blobstor) - and self.metabase == other.metabase - and self.writecache == other.writecache - and self.pilorama == other.pilorama - ) - - def __hash__(self): - return hash((self.metabase, self.writecache)) - - @staticmethod - def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int): - pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}" - blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key} - return len(blobstors) - - @staticmethod - def from_config_object(config_object: ConfigObj, shard_id: int): - var_prefix = f"{SHARD_PREFIX}{shard_id}" - - blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) - blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] - - write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") - - return Shard( - blobstors, - config_object.get(f"{var_prefix}_METABASE_PATH"), - config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "", - ) - - @staticmethod - def from_object(shard): - metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] - writecache_enabled = True - if "enabled" in shard["writecache"]: - writecache_enabled = shard["writecache"]["enabled"] - - writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] - if not writecache_enabled: - writecache = "" - - # Currently due to issue we need to check if pilorama exists in keys - # TODO: make pilorama mandatory after fix - if shard.get("pilorama"): - pilorama = shard["pilorama"]["path"] if "path" in shard["pilorama"] else shard["pilorama"] - else: - pilorama = None - - return Shard( - blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], - metabase=metabase, - writecache=writecache, - pilorama=pilorama, - ) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py deleted file mode 100644 index 4c303fc..0000000 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ /dev/null @@ -1,127 +0,0 @@ -import re -from dataclasses import dataclass -from typing import Optional - -from pydantic import BaseModel, Field, field_validator - -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.readable import HumanReadableEnum - - -@dataclass -class ObjectRef: - cid: str - oid: str - - -@dataclass -class LockObjectInfo(ObjectRef): - lifetime: Optional[int] = None - expire_at: Optional[int] = None - - -@dataclass -class StorageObjectInfo(ObjectRef): - size: Optional[int] = None - wallet: Optional[WalletInfo] = None - file_path: Optional[str] = None - file_hash: Optional[str] = None - attributes: Optional[list[dict[str, str]]] = None - tombstone: Optional[str] = None - locks: Optional[list[LockObjectInfo]] = None - - -class NodeStatus(HumanReadableEnum): - MAINTENANCE: str = "maintenance" - ONLINE: str = "online" - OFFLINE: str = "offline" - - -@dataclass -class NodeNetmapInfo: - node_id: str = None - node_status: NodeStatus = None - node_data_ips: list[str] = None - cluster_name: str = None - continent: str = None - country: str = None - country_code: str = None - external_address: list[str] = None - location: str = None - node: str = None - price: int = None - sub_div: str = None - sub_div_code: int = None - un_locode: str = None - role: str = None - - -class Interfaces(HumanReadableEnum): - DATA_O: str = "data0" - DATA_1: str = "data1" - MGMT: str = "mgmt" - INTERNAL_0: str = "internal0" - INTERNAL_1: str = "internal1" - - -@dataclass -class NodeNetInfo: - epoch: str = None - network_magic: str = None - time_per_block: str = None - container_fee: str = None - epoch_duration: str = None - inner_ring_candidate_fee: str = None - maximum_object_size: str = None - maximum_count_of_data_shards: str = None - maximum_count_of_parity_shards: str = None - withdrawal_fee: str = None - homomorphic_hashing_disabled: str = None - maintenance_mode_allowed: str = None - - -class Attributes(BaseModel): - cluster_name: str = Field(alias="ClusterName") - continent: str = Field(alias="Continent") - country: str = Field(alias="Country") - country_code: str = Field(alias="CountryCode") - external_addr: list[str] = Field(alias="ExternalAddr") - location: str = Field(alias="Location") - node: str = Field(alias="Node") - subdiv: str = Field(alias="SubDiv") - subdiv_code: str = Field(alias="SubDivCode") - un_locode: str = Field(alias="UN-LOCODE") - role: str = Field(alias="role") - - @field_validator("external_addr", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] - - -class NodeInfo(BaseModel): - public_key: str = Field(alias="publicKey") - addresses: list[str] = Field(alias="addresses") - state: str = Field(alias="state") - attributes: Attributes = Field(alias="attributes") - - @field_validator("addresses", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] - - -@dataclass -class Chunk: - def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None: - self.object_id = object_id - self.required_nodes = required_nodes - self.confirmed_nodes = confirmed_nodes - self.ec_parent_object_id = ec_parent_object_id - self.ec_index = ec_index - - def __str__(self) -> str: - return self.object_id - - def __repr__(self) -> str: - return self.object_id diff --git a/src/frostfs_testlib/storage/dataclasses/wallet.py b/src/frostfs_testlib/storage/dataclasses/wallet.py deleted file mode 100644 index d053d29..0000000 --- a/src/frostfs_testlib/storage/dataclasses/wallet.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -import logging -import os -from dataclasses import dataclass -from typing import Optional - -import yaml - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import NodeBase -from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet - -logger = logging.getLogger("frostfs.testlib.utils") - - -@dataclass -class WalletInfo: - path: str - password: str = DEFAULT_WALLET_PASS - config_path: str = DEFAULT_WALLET_CONFIG - - @staticmethod - def from_node(node: NodeBase): - wallet_path = node.get_wallet_path() - wallet_password = node.get_wallet_password() - wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path())) - with open(wallet_config_file, "w") as file: - file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password})) - - return WalletInfo(wallet_path, wallet_password, wallet_config_file) - - def get_address(self) -> str: - """ - Extracts the last address from wallet via neo3 lib. - - Returns: - The address of the wallet. - """ - return get_last_address_from_wallet(self.path, self.password) - - def get_address_from_json(self, account_id: int = 0) -> str: - """ - Extracts address of the given account id from wallet using json lookup. - (Useful if neo3 fails for some reason and can't be used). - - Args: - account_id: id of the account to get address. - - Returns: - address string. - """ - with open(self.path, "r") as wallet: - wallet_json = json.load(wallet) - assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}" - - return wallet_json["accounts"][account_id]["address"] - - -class WalletFactory: - def __init__(self, wallets_dir: str, shell: Shell) -> None: - self.shell = shell - self.wallets_dir = wallets_dir - - def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo: - """ - Creates new default wallet. - - Args: - file_name: output wallet file name. - password: wallet password. - - Returns: - WalletInfo object of new wallet. - """ - - if password is None: - password = "" - - base_path = os.path.join(self.wallets_dir, file_name) - wallet_path = f"{base_path}.json" - wallet_config_path = f"{base_path}.yaml" - init_wallet(wallet_path, password) - - with open(wallet_config_path, "w") as config_file: - config_file.write(f'wallet: {wallet_path}\npassword: "{password}"') - - reporter.attach(wallet_path, os.path.basename(wallet_path)) - - return WalletInfo(wallet_path, password, wallet_config_path) diff --git a/src/frostfs_testlib/storage/grpc_operations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py deleted file mode 100644 index d9f94b2..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ /dev/null @@ -1,15 +0,0 @@ -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper - - -class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) - self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) - self.ape_manager: interfaces.ApeManagerInterface = implementations.ApeManagerOperations(self.cli) - - -class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): - pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py deleted file mode 100644 index df820fa..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .ape_manager import ApeManagerOperations -from .chunks import ChunksOperations -from .container import ContainerOperations -from .netmap import NetmapOperations -from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py deleted file mode 100644 index 070d8a6..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT - - -class ApeManagerOperations: - def __init__(self, cli: FrostfsCli): - self.cli = cli - - @reporter.step("Add ape rule") - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.add( - rpc_endpoint=rpc_endpoint, - chain_id=chain_id, - chain_id_hex=chain_id_hex, - path=path, - rule=rule, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) - - @reporter.step("Get list APE rules") - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.list( - rpc_endpoint=rpc_endpoint, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) - - @reporter.step("Remove APE rule") - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.remove( - rpc_endpoint=rpc_endpoint, - chain_id=chain_id, - chain_id_hex=chain_id_hex, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py deleted file mode 100644 index 0d787e2..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ /dev/null @@ -1,165 +0,0 @@ -import json -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils.cli_utils import parse_netmap_output - - -class ChunksOperations(interfaces.ChunksInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - @reporter.step("Search node without chunks") - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - if not endpoint: - endpoint = cluster.default_rpc_endpoint - netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) - chunks_node_key = [] - for chunk in chunks: - chunks_node_key.extend(chunk.confirmed_nodes) - for node_info in netmap.copy(): - if node_info.node_id in chunks_node_key and node_info in netmap: - netmap.remove(node_info) - result = [] - for node_info in netmap: - for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.get_interface(Interfaces.MGMT): - result.append(cluster_node) - return result - - @reporter.step("Search node with chunk {chunk}") - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) - for node_info in netmap: - if node_info.node_id in chunk.confirmed_nodes: - for cluster_node in cluster.cluster_nodes: - if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: - return (cluster_node, node_info) - - @wait_for_success(300, 5, fail_testcase=None) - @reporter.step("Search shard with chunk {chunk}") - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" - node_shell = node.storage_node.host.get_shell() - shards_watcher = ShardsWatcher(node) - - with reporter.step("Search object file"): - for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items(): - check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout - if "1" in check_dir.strip(): - return shard_id - - @reporter.step("Get all chunks") - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = True, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - object_nodes = self.cli.object.nodes( - rpc_endpoint=rpc_endpoint, - cid=cid, - address=address, - bearer=bearer, - generate_key=generate_key, - oid=oid, - trace=trace, - root=root, - verify_presence_all=verify_presence_all, - json=json, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) - - @reporter.step("Get last parity chunk") - def get_parity( - self, - rpc_endpoint: str, - cid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = True, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - object_nodes = self.cli.object.nodes( - rpc_endpoint=rpc_endpoint, - cid=cid, - address=address, - bearer=bearer, - generate_key=generate_key, - oid=oid, - trace=trace, - root=root, - verify_presence_all=verify_presence_all, - json=json, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] - - @reporter.step("Get first data chunk") - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - oid: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = True, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> Chunk: - object_nodes = self.cli.object.nodes( - rpc_endpoint=rpc_endpoint, - cid=cid, - address=address, - bearer=bearer, - generate_key=generate_key, - oid=oid, - trace=trace, - root=root, - verify_presence_all=verify_presence_all, - json=json, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] - - def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: - parse_result = json.loads(object_nodes) - if parse_result.get("errors"): - raise RuntimeError(", ".join(parse_result["errors"])) - return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py deleted file mode 100644 index afdf6cb..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ /dev/null @@ -1,338 +0,0 @@ -import json -import logging -import re -from time import sleep -from typing import List, Optional, Union - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.clients.s3 import BucketContainerResolver -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.utils import json_utils - -logger = logging.getLogger("NeoLogger") - - -class ContainerOperations(interfaces.ContainerInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - @reporter.step("Create Container") - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - A wrapper for `frostfs-cli container create` call. - - Args: - wallet (WalletInfo): a wallet on whose behalf a container is created - rule (optional, str): placement rule for container - basic_acl (optional, str): an ACL for container, will be - appended to `--basic-acl` key - attributes (optional, dict): container attributes , will be - appended to `--attributes` key - session_token (optional, str): a path to session token file - session_wallet(optional, str): a path to the wallet which signed - the session token; this parameter makes sense - when paired with `session_token` - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - options (optional, dict): any other options to pass to the call - name (optional, str): container name attribute - await_mode (bool): block execution until container is persisted - wait_for_creation (): Wait for container shows in container list - timeout: Timeout for the operation. - - Returns: - (str): CID of the created container - """ - result = self.cli.container.create( - rpc_endpoint=endpoint, - policy=policy, - nns_zone=nns_zone, - nns_name=nns_name, - address=address, - attributes=attributes, - basic_acl=basic_acl, - await_mode=await_mode, - disable_timestamp=disable_timestamp, - force=force, - trace=trace, - name=name, - nonce=nonce, - session=session, - subnet=subnet, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - - cid = self._parse_cid(result.stdout) - - logger.info("Container created; waiting until it is persisted in the sidechain") - - return cid - - @reporter.step("List Containers") - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - **params, - ) -> List[str]: - """ - A wrapper for `frostfs-cli container list` call. It returns all the - available containers for the given wallet. - Args: - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - Returns: - (list): list of containers - """ - result = self.cli.container.list( - rpc_endpoint=endpoint, - name=name, - address=address, - generate_key=generate_key, - owner=owner, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - **params, - ) - return result.stdout.split() - - @reporter.step("List Objects in container") - def list_objects( - self, - endpoint: str, - cid: str, - bearer: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[str]: - """ - A wrapper for `frostfs-cli container list-objects` call. It returns all the - available objects in container. - Args: - container_id: cid of container - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - Returns: - (list): list of containers - """ - result = self.cli.container.list_objects( - rpc_endpoint=endpoint, - cid=cid, - bearer=bearer, - wallet=wallet, - address=address, - generate_key=generate_key, - trace=trace, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - logger.info(f"Container objects: \n{result}") - return result.stdout.split() - - @reporter.step("Delete container") - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ): - return self.cli.container.delete( - rpc_endpoint=endpoint, - cid=cid, - address=address, - await_mode=await_mode, - session=session, - ttl=ttl, - xhdr=xhdr, - force=force, - trace=trace, - ).stdout - - @reporter.step("Get container") - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> Union[dict, str]: - result = self.cli.container.get( - rpc_endpoint=endpoint, - cid=cid, - address=address, - generate_key=generate_key, - await_mode=await_mode, - to=to, - json_mode=json_mode, - trace=trace, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - container_info = json.loads(result.stdout) - attributes = dict() - for attr in container_info["attributes"]: - attributes[attr["key"]] = attr["value"] - container_info["attributes"] = attributes - container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) - return container_info - - @reporter.step("Get eacl container") - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.container.get_eacl( - rpc_endpoint=endpoint, - cid=cid, - address=address, - generate_key=generate_key, - await_mode=await_mode, - to=to, - session=session, - ttl=ttl, - xhdr=xhdr, - timeout=CLI_DEFAULT_TIMEOUT, - ).stdout - - @reporter.step("Get nodes container") - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[ClusterNode]: - result = self.cli.container.search_node( - rpc_endpoint=endpoint, - cid=cid, - address=address, - ttl=ttl, - from_file=from_file, - trace=trace, - short=short, - xhdr=xhdr, - generate_key=generate_key, - timeout=timeout, - ).stdout - - pattern = r"[0-9]+(?:\.[0-9]+){3}" - nodes_ip = list(set(re.findall(pattern, result))) - - with reporter.step(f"nodes ips = {nodes_ip}"): - nodes_list = cluster.get_nodes_by_ip(nodes_ip) - - with reporter.step(f"Return nodes - {nodes_list}"): - return nodes_list - - @reporter.step("Resolve container by name") - def resolve_container_by_name(name: str, node: ClusterNode): - resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) - resolver: BucketContainerResolver = resolver_cls() - return resolver.resolve(node, name) - - @reporter.step("Wait create container, with list") - def wait_creation(self, cid: str, endpoint: str, attempts: int = 15, sleep_interval: int = 1): - for _ in range(attempts): - containers = self.list(endpoint) - if cid in containers: - return - logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") - sleep(sleep_interval) - raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") - - def _parse_cid(self, output: str) -> str: - """ - Parses container ID from a given CLI output. The input string we expect: - container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN - awaiting... - container has been persisted on sidechain - We want to take 'container ID' value from the string. - - Args: - output (str): CLI output to parse - - Returns: - (str): extracted CID - """ - try: - # taking first line from command's output - first_line = output.split("\n")[0] - except Exception: - first_line = "" - logger.error(f"Got empty output: {output}") - splitted = first_line.split(": ") - if len(splitted) != 2: - raise ValueError(f"no CID was parsed from command output: \t{first_line}") - return splitted[1] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py deleted file mode 100644 index 76ee69a..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ /dev/null @@ -1,171 +0,0 @@ -import json as module_json -from typing import List, Optional - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.cli.netmap_parser import NetmapParser -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo - -from .. import interfaces - - -class NetmapOperations(interfaces.NetmapInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> int: - """ - Get current epoch number. - """ - output = ( - self.cli.netmap.epoch( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return int(output) - - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.netinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.netinfo(output) - - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.nodeinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - json=json, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.node_info(module_json.loads(output)) - - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_all_nodes(output) - - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py deleted file mode 100644 index be8a470..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ /dev/null @@ -1,708 +0,0 @@ -import json -import logging -import os -import re -import uuid -from typing import Any, Optional - -from frostfs_testlib import reporter, utils -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils import cli_utils, file_utils - -logger = logging.getLogger("NeoLogger") - - -class ObjectOperations(interfaces.ObjectInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli) - - @reporter.step("Delete object") - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - DELETE an Object. - - Args: - cid: ID of Container where we get the Object from - oid: ID of Object we are going to delete - bearer: path to Bearer Token file, appends to `--bearer` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): Tombstone ID - """ - result = self.cli.object.delete( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - id_str = result.stdout.split("\n")[1] - tombstone = id_str.split(":")[1] - return tombstone.strip() - - @reporter.step("Get object") - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> file_utils.TestFile: - """ - GET from FrostFS. - - Args: - cid (str): ID of Container where we get the Object from - oid (str): Object ID - bearer: path to Bearer Token file, appends to `--bearer` key - write_object: path to downloaded file, appends to `--file` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - no_progress(optional, bool): do not show progress bar - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): path to downloaded file - """ - if not write_object: - write_object = str(uuid.uuid4()) - test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object)) - - self.cli.object.get( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - file=test_file, - bearer=bearer, - no_progress=no_progress, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - return test_file - - @reporter.step("Get object from random node") - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - GET from FrostFS random storage node - - Args: - cid: ID of Container where we get the Object from - oid: Object ID - cluster: cluster object - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - write_object (optional, str): path to downloaded file, appends to `--file` key - no_progress(optional, bool): do not show progress bar - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): path to downloaded file - """ - endpoint = cluster.get_random_storage_rpc_endpoint() - return self.get( - cid, - oid, - endpoint, - bearer, - write_object, - xhdr, - no_progress, - session, - timeout, - ) - - @reporter.step("Get hash object") - def hash( - self, - rpc_endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - Get object hash. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - range: Range to take hash from in the form offset1:length1,... - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - salt: Salt in hex format. - ttl: TTL value in request meta header (default 2). - session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. - hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - result = self.cli.object.hash( - rpc_endpoint=rpc_endpoint, - cid=cid, - oid=oid, - address=address, - bearer=bearer, - generate_key=generate_key, - range=range, - salt=salt, - ttl=ttl, - xhdr=xhdr, - session=session, - hash_type=hash_type, - timeout=timeout, - ) - - if range: - # Cut off the range and return only hash - return result.stdout.split(":")[1].strip() - - return result.stdout - - @reporter.step("Head object") - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> CommandResult | Any: - """ - HEAD an Object. - - Args: - cid (str): ID of Container where we get the Object from - oid (str): ObjectID to HEAD - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - endpoint(optional, str): FrostFS endpoint to send request to - json_output(optional, bool): return response in JSON format or not; this flag - turns into `--json` key - is_raw(optional, bool): send "raw" request or not; this flag - turns into `--raw` key - is_direct(optional, bool): send request directly to the node or not; this flag - turns into `--ttl 1` key - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - depending on the `json_output` parameter value, the function returns - (dict): HEAD response in JSON format - or - (str): HEAD response as a plain text - """ - result = self.cli.object.head( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - json_mode=json_output, - raw=is_raw, - ttl=1 if is_direct else None, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - if not json_output: - return result - - try: - decoded = json.loads(result.stdout) - except Exception as exc: - # If we failed to parse output as JSON, the cause might be - # the plain text string in the beginning of the output. - # Here we cut off first string and try to parse again. - logger.info(f"failed to parse output: {exc}") - logger.info("parsing output in another way") - fst_line_idx = result.stdout.find("\n") - decoded = json.loads(result.stdout[fst_line_idx:]) - - # if response - if "chunks" in decoded.keys(): - logger.info("decoding ec chunks") - return decoded["chunks"] - - # If response is Complex Object header, it has `splitId` key - if "splitId" in decoded.keys(): - logger.info("decoding split header") - return utils.json_utils.decode_split_header(decoded) - - # If response is Last or Linking Object header, - # it has `header` dictionary and non-null `split` dictionary - if "split" in decoded["header"].keys(): - if decoded["header"]["split"]: - logger.info("decoding linking object") - return utils.json_utils.decode_linking_object(decoded) - - if decoded["header"]["objectType"] == "STORAGE_GROUP": - logger.info("decoding storage group") - return utils.json_utils.decode_storage_group(decoded) - - if decoded["header"]["objectType"] == "TOMBSTONE": - logger.info("decoding tombstone") - return utils.json_utils.decode_tombstone(decoded) - - logger.info("decoding simple header") - return utils.json_utils.decode_simple_header(decoded) - - @reporter.step("Lock Object") - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - Locks object in container. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - oid: Object ID. - lifetime: Lock lifetime. - expire_at: Lock expiration epoch. - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - session: Path to a JSON-encoded container session token. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation. - - Returns: - Lock object ID - """ - result = self.cli.object.lock( - rpc_endpoint=endpoint, - lifetime=lifetime, - expire_at=expire_at, - address=address, - cid=cid, - oid=oid, - bearer=bearer, - xhdr=xhdr, - session=session, - ttl=ttl, - timeout=timeout, - ) - - # Splitting CLI output to separate lines and taking the penultimate line - id_str = result.stdout.strip().split("\n")[0] - oid = id_str.split(":")[1] - return oid.strip() - - @reporter.step("Put object") - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - PUT of given file. - - Args: - path: path to file to be PUT - cid: ID of Container where we get the Object from - bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call - attributes: User attributes in form of Key1=Value1,Key2=Value2 - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - no_progress: do not show progress bar - expire_at: Last epoch in the life of the object - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): ID of uploaded Object - """ - result = self.cli.object.put( - rpc_endpoint=endpoint, - file=path, - cid=cid, - attributes=attributes, - bearer=bearer, - copies_number=copies_number, - expire_at=expire_at, - no_progress=no_progress, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - # Splitting CLI output to separate lines and taking the penultimate line - id_str = result.stdout.strip().split("\n")[-2] - oid = id_str.split(":")[1] - return oid.strip() - - @reporter.step("Patch object") - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: list[str] = None, - payloads: list[str] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - trace: bool = False, - ) -> str: - """ - PATCH an object. - - Args: - cid: ID of Container where we get the Object from - oid: Object ID - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] - payloads: An array of file paths to be applied in each range - new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" - replace_attrs: Replace all attributes completely with new ones specified in new_attrs - bearer: Path to Bearer Token file, appends to `--bearer` key - xhdr: Request X-Headers in form of Key=Value - session: Path to a JSON-encoded container session token - timeout: Timeout for the operation - trace: Generate trace ID and print it - Returns: - (str): ID of patched Object - """ - result = self.cli.object.patch( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=ranges, - payload=payloads, - new_attrs=new_attrs, - replace_attrs=replace_attrs, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - trace=trace, - ) - return result.stdout.split(":")[1].strip() - - @reporter.step("Put object to random node") - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - PUT of given file to a random storage node. - - Args: - path: path to file to be PUT - cid: ID of Container where we get the Object from - cluster: cluster under test - bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call - attributes: User attributes in form of Key1=Value1,Key2=Value2 - cluster: cluster under test - no_progress: do not show progress bar - expire_at: Last epoch in the life of the object - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - ID of uploaded Object - """ - endpoint = cluster.get_random_storage_rpc_endpoint() - return self.put( - path, - cid, - endpoint, - bearer, - copies_number, - attributes, - xhdr, - expire_at, - no_progress, - session, - timeout=timeout, - ) - - @reporter.step("Get Range") - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> tuple[file_utils.TestFile, bytes]: - """ - GETRANGE an Object. - - Args: - wallet: wallet on whose behalf GETRANGE is done - cid: ID of Container where we get the Object from - oid: ID of Object we are going to request - range_cut: range to take data from in the form offset:length - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - bearer: path to Bearer Token file, appends to `--bearer` key - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str, bytes) - path to the file with range content and content of this file as bytes - """ - test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) - - self.cli.object.range( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=range_cut, - file=test_file, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - with open(test_file, "rb") as file: - content = file.read() - return test_file, content - - @reporter.step("Search object") - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> list: - """ - SEARCH an Object. - - Args: - wallet: wallet on whose behalf SEARCH is done - cid: ID of Container where we get the Object from - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - filters: key=value pairs to filter Objects - expected_objects_list: a list of ObjectIDs to compare found Objects with - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - phy: Search physically stored objects. - root: Search for user objects. - timeout: Timeout for the operation. - - Returns: - list of found ObjectIDs - """ - result = self.cli.object.search( - rpc_endpoint=endpoint, - cid=cid, - bearer=bearer, - oid=oid, - xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, - session=session, - phy=phy, - root=root, - address=address, - generate_key=generate_key, - ttl=ttl, - timeout=timeout, - ) - - found_objects = re.findall(r"(\w{43,44})", result.stdout) - - if expected_objects_list: - if sorted(found_objects) == sorted(expected_objects_list): - logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") - else: - logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") - - return found_objects - - @wait_for_success() - @reporter.step("Search object nodes") - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[ClusterNode]: - endpoint = alive_node.storage_node.get_rpc_endpoint() - - response = self.cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - - response_json = json.loads(response.stdout) - # Currently, the command will show expected and confirmed nodes. - # And we (currently) count only nodes which are both expected and confirmed - object_nodes_id = { - required_node - for data_object in response_json["data_objects"] - for required_node in data_object["required_nodes"] - if required_node in data_object["confirmed_nodes"] - } - - netmap_nodes_list = cli_utils.parse_netmap_output( - self.cli.netmap.snapshot( - rpc_endpoint=endpoint, - ).stdout - ) - netmap_nodes = [ - netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id - ] - - object_nodes = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) - ] - - return object_nodes - - @reporter.step("Search parts of object") - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[str]: - endpoint = alive_node.storage_node.get_rpc_endpoint() - response = self.cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - response_json = json.loads(response.stdout) - return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py deleted file mode 100644 index 379bbe0..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .ape_manager import ApeManagerInterface -from .chunks import ChunksInterface -from .container import ContainerInterface -from .netmap import NetmapInterface -from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py deleted file mode 100644 index 5b198bc..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py +++ /dev/null @@ -1,48 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.shell.interfaces import CommandResult - - -class ApeManagerInterface(ABC): - @abstractmethod - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass - - @abstractmethod - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass - - @abstractmethod - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py deleted file mode 100644 index 986b938..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py +++ /dev/null @@ -1,79 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py deleted file mode 100644 index 397f7b2..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py +++ /dev/null @@ -1,129 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") - - @abstractmethod - def wait_creation(self, cid: str, endpoint: str, attempts: Optional[str], sleep_interval: Optional[int]) -> None: - raise NotImplementedError("No implemented method wait_creation") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py deleted file mode 100644 index 3fdc98a..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ /dev/null @@ -1,89 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo - - -class NetmapInterface(ABC): - @abstractmethod - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> int: - """ - Get current epoch number. - """ - raise NotImplementedError("No implemethed method epoch") - - @abstractmethod - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method netinfo") - - @abstractmethod - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method nodeinfo") - - @abstractmethod - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method snapshot") - - @abstractmethod - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py deleted file mode 100644 index 550c461..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py +++ /dev/null @@ -1,223 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.utils import file_utils - -from .chunks import ChunksInterface - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py deleted file mode 100644 index 5edc99f..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli - -from . import interfaces - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.cli: FrostfsCli - self.object: interfaces.ObjectInterface - self.container: interfaces.ContainerInterface - self.netmap: interfaces.NetmapInterface - self.ape_manager: interfaces.ApeManagerInterface diff --git a/src/frostfs_testlib/storage/service_registry.py b/src/frostfs_testlib/storage/service_registry.py deleted file mode 100644 index 3154dc7..0000000 --- a/src/frostfs_testlib/storage/service_registry.py +++ /dev/null @@ -1,21 +0,0 @@ -from frostfs_testlib.storage.dataclasses.node_base import NodeBase, NodeClassDict, ServiceClass - - -class ServiceRegistry: - _class_mapping: dict[str, NodeClassDict] = {} - - def get_entry(self, service_type: type[ServiceClass]) -> NodeClassDict: - key = service_type.__name__ - - if key not in self._class_mapping: - raise RuntimeError( - f"Unregistered service type requested: {key}. At this moment registered services are: {self._class_mapping.keys()}" - ) - - return self._class_mapping[key] - - def register_service(self, service_name: str, service_class: type[NodeBase]): - self._class_mapping[service_class.__name__] = { - "cls": service_class, - "hosting_service_name": service_name, - } diff --git a/src/frostfs_testlib/testing/__init__.py b/src/frostfs_testlib/testing/__init__.py deleted file mode 100644 index 3483972..0000000 --- a/src/frostfs_testlib/testing/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from frostfs_testlib.testing.parallel import parallel -from frostfs_testlib.testing.test_control import expect_not_raises, run_optionally, wait_for_success diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py deleted file mode 100644 index 50c8eb6..0000000 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ /dev/null @@ -1,43 +0,0 @@ -import time -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import MORPH_BLOCK_TIME -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps import epoch -from frostfs_testlib.storage.cluster import Cluster -from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode -from frostfs_testlib.utils import datetime_utils - - -# To skip adding every mandatory singleton dependency to EACH test function -class ClusterTestBase: - shell: Shell - cluster: Cluster - - @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") - def tick_epochs( - self, - epochs_to_tick: int, - alive_node: Optional[StorageNode] = None, - wait_block: int = None, - ): - for _ in range(epochs_to_tick): - self.tick_epoch(alive_node, wait_block) - - def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): - epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) - if wait_block: - self.wait_for_blocks(wait_block) - - def wait_for_epochs_align(self): - epoch.wait_for_epochs_align(self.shell, self.cluster) - - def get_epoch(self): - return epoch.get_epoch(self.shell, self.cluster) - - def ensure_fresh_epoch(self): - return epoch.ensure_fresh_epoch(self.shell, self.cluster) - - def wait_for_blocks(self, blocks_count: int = 1): - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py deleted file mode 100644 index 6c4f6e0..0000000 --- a/src/frostfs_testlib/testing/parallel.py +++ /dev/null @@ -1,148 +0,0 @@ -import itertools -import traceback -from concurrent.futures import Future, ThreadPoolExecutor -from contextlib import contextmanager -from typing import Callable, Collection, Optional, Union - -MAX_WORKERS = 50 - - -@contextmanager -def parallel_workers_limit(workers_count: int): - global MAX_WORKERS - original_value = MAX_WORKERS - MAX_WORKERS = workers_count - try: - yield - finally: - MAX_WORKERS = original_value - - -def parallel( - fn: Union[Callable, list[Callable]], - parallel_items: Optional[Collection] = None, - *args, - **kwargs, -) -> list[Future]: - """Parallel execution of selected function or list of function using ThreadPoolExecutor. - Also checks the exceptions of each thread. - - Args: - fn: function(s) to run. Can work in 2 modes: - 1. If you have dedicated function with some items to process in parallel, - like you do with executor.map(fn, parallel_items), pass this function as fn. - 2. If you need to process each item with it's own method, like you do - with executor.submit(fn, args, kwargs), pass list of methods here. - See examples in runners.py in this repo. - parallel_items: items to iterate on (should be None in case of 2nd mode). - args: any other args required in target function(s). - if any arg is itertool.cycle, it will be iterated before passing to new thread. - kwargs: any other kwargs required in target function(s) - if any kwarg is itertool.cycle, it will be iterated before passing to new thread. - - Returns: - list of futures. - """ - - if callable(fn): - if not parallel_items: - raise RuntimeError("Parallel items should not be none when fn is callable.") - futures = _run_by_items(fn, parallel_items, *args, **kwargs) - elif isinstance(fn, list): - futures = _run_by_fn_list(fn, *args, **kwargs) - else: - raise RuntimeError("Nothing to run. fn should be either callable or list of callables.") - - # Check for exceptions - exceptions = [future.exception() for future in futures if future.exception()] - if exceptions: - # Prettify exception in parallel with all underlying stack traces - # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like - # - # RuntimeError: The following exceptions occured during parallel run: - # 1) Exception one text - # 2) Exception two text - # 3) Exception three text - # TRACES: - # ==== 1 ==== - # Traceback (most recent call last): - # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run - # result = self.fn(*self.args, **self.kwargs) - # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service - # raise RuntimeError(f"Exception one text") - # RuntimeError: Exception one text - # - # ==== 2 ==== - # Traceback (most recent call last): - # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run - # result = self.fn(*self.args, **self.kwargs) - # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service - # raise RuntimeError(f"Exception two text") - # RuntimeError: Exception two text - # - # ==== 3 ==== - # Traceback (most recent call last): - # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run - # result = self.fn(*self.args, **self.kwargs) - # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service - # raise RuntimeError(f"Exception three text") - # RuntimeError: Exception three text - short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) - stack_traces = "\n".join( - [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] - ) - message = f"{short_summary}\nTRACES:\n{stack_traces}" - raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") - return futures - - -def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: - if not len(fn_list): - return [] - if not all([callable(f) for f in fn_list]): - raise RuntimeError("fn_list should contain only callables") - - futures: list[Future] = [] - - with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: - for fn in fn_list: - task_args = _get_args(*args) - task_kwargs = _get_kwargs(**kwargs) - - futures.append(executor.submit(fn, *task_args, **task_kwargs)) - - return futures - - -def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: - futures: list[Future] = [] - - with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: - for item in parallel_items: - task_args = _get_args(*args) - task_kwargs = _get_kwargs(**kwargs) - task_args.insert(0, item) - - futures.append(executor.submit(fn, *task_args, **task_kwargs)) - - return futures - - -def _get_kwargs(**kwargs): - actkwargs = {} - for key, arg in kwargs.items(): - if isinstance(arg, itertools.cycle): - actkwargs[key] = next(arg) - else: - actkwargs[key] = arg - return actkwargs - - -def _get_args(*args): - actargs = [] - for arg in args: - if isinstance(arg, itertools.cycle): - actargs.append(next(arg)) - else: - actargs.append(arg) - return actargs diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py deleted file mode 100644 index 80f1169..0000000 --- a/src/frostfs_testlib/testing/readable.py +++ /dev/null @@ -1,36 +0,0 @@ -from abc import ABCMeta -from enum import Enum - - -class HumanReadableEnum(Enum): - def __str__(self): - return self._name_ - - def __repr__(self): - return self._name_ - - -class HumanReadableABCMeta(ABCMeta): - def __str__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return cls.__name__ - - def __repr__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return cls.__name__ - - -class HumanReadableABC(metaclass=HumanReadableABCMeta): - @classmethod - def __str__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return type(cls).__name__ - - @classmethod - def __repr__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return type(cls).__name__ diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py deleted file mode 100644 index bc38208..0000000 --- a/src/frostfs_testlib/testing/test_control.py +++ /dev/null @@ -1,222 +0,0 @@ -import inspect -import logging -import os -from functools import wraps -from time import sleep, time -from typing import Any - -import yaml -from _pytest.outcomes import Failed -from pytest import fail - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.utils.func_utils import format_by_args - -logger = logging.getLogger("NeoLogger") - -# TODO: we may consider deprecating some methods here and use tenacity instead - - -class expect_not_raises: - """ - Decorator/Context manager check that some action, method or test does not raise exceptions - - Useful to set proper state of failed test cases in allure - - Example: - def do_stuff(): - raise Exception("Fail") - - def test_yellow(): <- this test is marked yellow (Test Defect) in allure - do_stuff() - - def test_red(): <- this test is marked red (Failed) in allure - with expect_not_raises(): - do_stuff() - - @expect_not_raises() - def test_also_red(): <- this test is also marked red (Failed) in allure - do_stuff() - """ - - def __enter__(self): - pass - - def __exit__(self, exception_type, exception_value, exception_traceback): - if exception_value: - fail(str(exception_value)) - - def __call__(self, func): - @wraps(func) - def impl(*a, **kw): - with expect_not_raises(): - func(*a, **kw) - - return impl - - -def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None, title: str = None): - """ - Decorator to wait for some conditions/functions to pass successfully. - This is useful if you don't know exact time when something should pass successfully and do not - want to use sleep(X) with too big X. - - Be careful though, wrapped function should only check the state of something, not change it. - """ - - assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" - - def wrapper(func): - def call(func, *a, **kw): - last_exception = None - for _ in range(max_attempts): - try: - actual_result = func(*a, **kw) - if expected_result is not None: - assert expected_result == actual_result - return actual_result - except Exception as ex: - logger.debug(ex) - last_exception = ex - sleep(sleep_interval) - except Failed as ex: - logger.debug(ex) - last_exception = ex - sleep(sleep_interval) - - # timeout exceeded with no success, raise last_exception - if last_exception is not None: - raise last_exception - - @wraps(func) - def impl(*a, **kw): - if title is not None: - with reporter.step(format_by_args(func, title, *a, **kw)): - return call(func, *a, **kw) - - return call(func, *a, **kw) - - return impl - - return wrapper - - -def run_optionally(enabled: bool, mock_value: Any = True): - """ - Decorator to run something conditionally. - MUST be placed after @pytest.fixture and before @allure decorators. - - Args: - enabled: if true, decorated func will be called as usual. if false the decorated func will be skipped and mock_value will be returned. - mock_value: the value to be returned when decorated func is skipped. - """ - - def deco(func): - @wraps(func) - def func_impl(*a, **kw): - if enabled: - return func(*a, **kw) - return mock_value - - @wraps(func) - def gen_impl(*a, **kw): - if enabled: - yield from func(*a, **kw) - return - yield mock_value - - return gen_impl if inspect.isgeneratorfunction(func) else func_impl - - return deco - - -def cached_fixture(enabled: bool): - """ - Decorator to cache fixtures. - MUST be placed after @pytest.fixture and before @allure decorators. - - Args: - enabled: if true, decorated func will be cached. - """ - - def deco(func): - @wraps(func) - def func_impl(*a, **kw): - # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters - cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") - - if enabled and os.path.exists(cache_file): - with open(cache_file, "r") as cache_input: - return yaml.load(cache_input, Loader=yaml.Loader) - - result = func(*a, **kw) - - if enabled: - with open(cache_file, "w") as cache_output: - yaml.dump(result, cache_output) - return result - - # TODO: cache yielding fixtures - @wraps(func) - def gen_impl(*a, **kw): - raise NotImplementedError("Not implemented for yielding fixtures") - - return gen_impl if inspect.isgeneratorfunction(func) else func_impl - - return deco - - -def wait_for_success( - max_wait_time: int = 60, - interval: int = 1, - expected_result: Any = None, - fail_testcase: bool = False, - fail_message: str = "", - title: str = None, -): - """ - Decorator to wait for some conditions/functions to pass successfully. - This is useful if you don't know exact time when something should pass successfully and do not - want to use sleep(X) with too big X. - - Be careful though, wrapped function should only check the state of something, not change it. - """ - - def wrapper(func): - def call(func, *a, **kw): - start = int(round(time())) - last_exception = None - while start + max_wait_time >= int(round(time())): - try: - actual_result = func(*a, **kw) - if expected_result is not None: - assert expected_result == actual_result, fail_message - return actual_result - except Exception as ex: - logger.debug(ex) - last_exception = ex - sleep(interval) - except Failed as ex: - logger.debug(ex) - last_exception = ex - sleep(interval) - - if fail_testcase: - fail(str(last_exception)) - - # timeout exceeded with no success, raise last_exception - if last_exception is not None: - raise last_exception - - @wraps(func) - def impl(*a, **kw): - if title is not None: - with reporter.step(format_by_args(func, title, *a, **kw)): - return call(func, *a, **kw) - - return call(func, *a, **kw) - - return impl - - return wrapper diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py deleted file mode 100644 index 4acc5b1..0000000 --- a/src/frostfs_testlib/utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -Idea of utils is to have small utilitary functions which are not dependent of anything. -""" - -import frostfs_testlib.utils.converting_utils -import frostfs_testlib.utils.datetime_utils -import frostfs_testlib.utils.json_utils -import frostfs_testlib.utils.string_utils -import frostfs_testlib.utils.wallet_utils diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py deleted file mode 100644 index 8787296..0000000 --- a/src/frostfs_testlib/utils/cli_utils.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/python3.10 - -# TODO: This file is deprecated and all code which uses these calls should be refactored to use shell classes - -""" -Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. -""" -import csv -import json -import logging -import re -import sys -from contextlib import suppress -from datetime import datetime -from io import StringIO -from textwrap import shorten -from typing import Any, Optional, Union - -import pexpect - -from frostfs_testlib import reporter -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo - -logger = logging.getLogger("NeoLogger") -COLOR_GREEN = "\033[92m" -COLOR_OFF = "\033[0m" - - -def _run_with_passwd(cmd: str) -> str: - child = pexpect.spawn(cmd) - child.delaybeforesend = 1 - child.expect(".*") - child.sendline("\r") - if sys.platform == "darwin": - child.expect(pexpect.EOF) - cmd = child.before - else: - child.wait() - cmd = child.read() - return cmd.decode() - - -def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: - child = pexpect.spawn(cmd) - child.delaybeforesend = 1 - - child.expect("AWS Access Key ID.*") - child.sendline(key_id) - - child.expect("AWS Secret Access Key.*") - child.sendline(access_key) - - child.expect("Default region name.*") - child.sendline("region") - - child.expect("Default output format.*") - child.sendline(out_format) - - child.wait() - cmd = child.read() - # child.expect(pexpect.EOF) - # cmd = child.before - return cmd.decode() - - -def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: - command_attachment = ( - f"COMMAND: '{cmd}'\n" - f"OUTPUT:\n {output}\n" - f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" - ) - with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): - reporter.attach(command_attachment, "Command execution") - - -def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: - logger.info(f"{cmd}: {output}") - - if not params: - params = {} - - if params.get("Body") and len(params.get("Body")) > 1000: - params["Body"] = "" - - output_params = params - - try: - json_params = json.dumps(params, indent=4, sort_keys=True, default=str) - except TypeError as err: - logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") - else: - output_params = json_params - - output = json.dumps(output, indent=4, sort_keys=True, default=str) - - command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" - aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) - - reporter.attach(command_execution, "Command execution") - reporter.attach(aws_command, "AWS CLI Command") - - -def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: - overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] - command = command.replace("_", "-") - options = [] - - for name, value in params.items(): - name = _convert_json_name_to_aws_cli(name) - - # To override parameters for AWS CLI - if name in overriden_names: - continue - - if option := _create_option(name, value): - options.append(option) - - for name, value in kwargs.items(): - name = _convert_json_name_to_aws_cli(name) - if option := _create_option(name, value): - options.append(option) - - options = " ".join(options) - api = "s3api" if "s3" in kwargs["endpoint"] else "iam" - return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" - - -def _convert_json_name_to_aws_cli(name: str) -> str: - specific_names = {"CORSConfiguration": "cors-configuration"} - - if aws_cli_name := specific_names.get(name): - return aws_cli_name - return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") - - -def _create_option(name: str, value: Any) -> str | None: - if isinstance(value, bool) and value: - return f"--{name}" - - if isinstance(value, dict): - value = json.dumps(value, indent=4, sort_keys=True, default=str) - return f"--{name} '{value}'" - - if value: - return f"--{name} {value}" - - return None - - -def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: - """ - The code will parse each line and return each node as dataclass. - """ - netmap_nodes = output.split("Node ")[1:] - dataclasses_netmap = [] - result_netmap = {} - - regexes = { - "node_id": r"\d+: (?P\w+)", - "node_data_ips": r"(?P/ip4/.+?)$", - "node_status": r"(?PONLINE|OFFLINE)", - "cluster_name": r"ClusterName: (?P\w+)", - "continent": r"Continent: (?P\w+)", - "country": r"Country: (?P\w+)", - "country_code": r"CountryCode: (?P\w+)", - "external_address": r"ExternalAddr: (?P/ip[4].+?)$", - "location": r"Location: (?P\w+.*)", - "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", - "price": r"Price: (?P\d+)", - "sub_div": r"SubDiv: (?P.*)", - "sub_div_code": r"SubDivCode: (?P\w+)", - "un_locode": r"UN-LOCODE: (?P\w+.*)", - "role": r"role: (?P\w+)", - } - - for node in netmap_nodes: - for key, regex in regexes.items(): - search_result = re.search(regex, node, flags=re.MULTILINE) - if key == "node_data_ips": - result_netmap[key] = search_result[key].strip().split(" ") - continue - if key == "external_address": - result_netmap[key] = search_result[key].strip().split(",") - continue - if search_result == None: - result_netmap[key] = None - continue - result_netmap[key] = search_result[key].strip() - - dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) - - return dataclasses_netmap - - -def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: - parsing_output = [] - reader = csv.reader(StringIO(output.strip()), delimiter=delimiter) - iter_reader = iter(reader) - header_row = next(iter_reader) - for row in iter_reader: - table = {} - for i in range(len(row)): - header = header_row[i].strip().lower().replace(" ", "_") - value = row[i].strip().lower() - if header: - table[header] = value - parsing_output.append(table) - return parsing_output diff --git a/src/frostfs_testlib/utils/converting_utils.py b/src/frostfs_testlib/utils/converting_utils.py deleted file mode 100644 index 273d9b4..0000000 --- a/src/frostfs_testlib/utils/converting_utils.py +++ /dev/null @@ -1,75 +0,0 @@ -import base64 -import binascii -import json -from typing import Tuple - -import base58 - - -def calc_unit(value: float, skip_units: int = 0) -> Tuple[float, str]: - units = ["B", "KiB", "MiB", "GiB", "TiB"] - - for unit in units[skip_units:]: - if value < 1024: - return value, unit - - value = value / 1024.0 - - return value, unit - - -def str_to_ascii_hex(input: str) -> str: - b = binascii.hexlify(input.encode()) - return str(b)[2:-1] - - -def ascii_hex_to_str(input: str) -> bytes: - return bytes.fromhex(input) - - -# Two functions below do parsing of Base64-encoded byte arrays which -# tests receive from Neo node RPC calls. - - -def process_b64_bytearray_reverse(data: str) -> bytes: - """ - This function decodes input data from base64, reverses the byte - array and returns its string representation. - """ - arr = bytearray(base64.standard_b64decode(data)) - arr.reverse() - return binascii.b2a_hex(arr) - - -def process_b64_bytearray(data: str) -> bytes: - """ - This function decodes input data from base64 and returns the - bytearray string representation. - """ - arr = bytearray(base64.standard_b64decode(data)) - return binascii.b2a_hex(arr) - - -def contract_hash_to_address(chash: str) -> str: - """ - This function accepts contract hash in BE, then translates in to LE, - prepends NEO wallet prefix and encodes to base58. It is equal to - `UInt160ToString` method in NEO implementations. - """ - be = bytearray(bytes.fromhex(chash)) - be.reverse() - return base58.b58encode_check(b"\x35" + bytes(be)).decode() - - -def get_contract_hash_from_manifest(manifest_path: str) -> str: - with open(manifest_path) as m: - data = json.load(m) - # cut off '0x' and return the hash - return data["abi"]["hash"][2:] - - -def get_wif_from_private_key(priv_key: bytes) -> str: - wif_version = b"\x80" - compressed_flag = b"\x01" - wif = base58.b58encode_check(wif_version + priv_key + compressed_flag) - return wif.decode("utf-8") diff --git a/src/frostfs_testlib/utils/datetime_utils.py b/src/frostfs_testlib/utils/datetime_utils.py deleted file mode 100644 index 830178f..0000000 --- a/src/frostfs_testlib/utils/datetime_utils.py +++ /dev/null @@ -1,30 +0,0 @@ -# There is place for date time utils functions - - -def parse_time(value: str) -> int: - """Converts time interval in text form into time interval as number of seconds. - - Args: - value: time interval as text. - - Returns: - Number of seconds in the parsed time interval. - """ - if value is None: - return 0 - - value = value.lower() - - for suffix in ["s", "sec"]: - if value.endswith(suffix): - return int(value[: -len(suffix)]) - - for suffix in ["m", "min"]: - if value.endswith(suffix): - return int(value[: -len(suffix)]) * 60 - - for suffix in ["h", "hr", "hour"]: - if value.endswith(suffix): - return int(value[: -len(suffix)]) * 60 * 60 - - raise ValueError(f"Unknown units in time value '{value}'") diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py deleted file mode 100644 index 3fdebe1..0000000 --- a/src/frostfs_testlib/utils/env_utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import logging -import re - -from frostfs_testlib import reporter - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Read environment.properties") -def read_env_properties(file_path: str) -> dict: - with open(file_path, "r") as file: - raw_content = file.read() - - env_properties = {} - for line in raw_content.split("\n"): - m = re.match("(.*?)=(.*)", line) - if not m: - logger.warning(f"Could not parse env property from {line}") - continue - key, value = m.group(1), m.group(2) - env_properties[key] = value - return env_properties - - -@reporter.step("Update data in environment.properties") -def save_env_properties(file_path: str, env_data: dict) -> None: - with open(file_path, "a+") as env_file: - for env, env_value in env_data.items(): - env_file.write(f"{env}={env_value}\n") diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py deleted file mode 100644 index 5c4d52f..0000000 --- a/src/frostfs_testlib/utils/failover_utils.py +++ /dev/null @@ -1,198 +0,0 @@ -import logging -from dataclasses import dataclass -from time import sleep -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.object import neo_go_dump_keys -from frostfs_testlib.steps.node_management import storage_node_healthcheck -from frostfs_testlib.steps.storage_policy import get_nodes_with_object -from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode -from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain -from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils.datetime_utils import parse_time - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Check and return status of given service") -def service_status(service: str, shell: Shell) -> str: - return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() - - -@dataclass -class TopCommand: - """ - This class using `from_stdout` helps to parse result from `top command`, could return result only for one PID - pid: Process PID - output: stdout result from TOP command - """ - - pid: Optional[str] = None - user: Optional[str] = None - pr: Optional[str] = None - ni: Optional[str] = None - virt: Optional[str] = None - res: Optional[str] = None - shr: Optional[str] = None - status: Optional[str] = None - cpu_percent: Optional[str] = None - mem_percent: Optional[str] = None - time: Optional[str] = None - cmd: Optional[str] = None - STATUS_RUNNING = "R" - STATUS_SLEEP = "S" - STATUS_ZOMBIE = "Z" - STATUS_UNSLEEP = "D" - STATUS_TRACED = "T" - - @staticmethod - def from_stdout(output: str, requested_pid: int) -> "TopCommand": - list_var = [None for i in range(12)] - for line in output.split("\n"): - if str(requested_pid) in line: - list_var = line.split() - return TopCommand( - pid=list_var[0], - user=list_var[1], - pr=list_var[2], - ni=list_var[3], - virt=list_var[4], - res=list_var[5], - shr=list_var[6], - status=list_var[7], - cpu_percent=list_var[8], - mem_percent=list_var[9], - time=list_var[10], - cmd=list_var[11], - ) - - -@reporter.step("Run `top` command with specified PID") -def service_status_top(service: str, shell: Shell) -> TopCommand: - pid = service_pid(service, shell) - output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout - return TopCommand.from_stdout(output, pid) - - -@reporter.step("Restart service n times with sleep") -def multiple_restart( - service_type: type[NodeBase], - node: ClusterNode, - count: int = 5, - sleep_interval: int = 2, -): - service_systemctl_name = node.service(service_type).get_service_systemctl_name() - service_name = node.service(service_type).name - for _ in range(count): - node.host.restart_service(service_name) - logger.info(f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue") - sleep(sleep_interval) - - -@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") -def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): - cmd = "" - for service in service_list: - cmd += f' sudo systemctl status {service.get_service_systemctl_name()} --lines=0 | grep "Active:";' - result = cluster_node.host.get_shell().exec(cmd).stdout.rstrip() - statuses = list() - for line in result.split("\n"): - status_substring = line.split() - statuses.append(status_substring[1]) - unique_statuses = list(set(statuses)) - assert ( - len(unique_statuses) == 1 and expected_status in unique_statuses - ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" - - -@wait_for_success(60, 5, title="Wait for {service} become active") -def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): - real_status = service_status(service=service, shell=shell) - assert ( - expected_status == real_status - ), f"Service {service}: expected status= {expected_status}, real status {real_status}" - - -@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") -def service_type_healthcheck( - service_type: type[NodeBase], - node: ClusterNode, -): - service = node.service(service_type) - assert ( - service.service_healthcheck() - ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" - - -@reporter.step("Kill by process name") -def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): - service_systemctl_name = node.service(service_type).get_service_systemctl_name() - pid = service_pid(service_systemctl_name, node.host.get_shell()) - node.host.get_shell().exec(f"sudo kill -9 {pid}") - - -@reporter.step("Suspend {service}") -def suspend_service(shell: Shell, service: str): - shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") - - -@reporter.step("Resume {service}") -def resume_service(shell: Shell, service: str): - shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") - - -# retry mechanism cause when the task has been started recently '0' PID could be returned -@wait_for_success(10, 1, title="Get {service} pid") -def service_pid(service: str, shell: Shell) -> int: - output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() - splitted = output.split("=") - PID = int(splitted[1]) - assert PID > 0, f"Service {service} has invalid PID={PID}" - return PID - - -@reporter.step("Wrapper for neo-go dump keys command") -def dump_keys(shell: Shell, node: ClusterNode) -> dict: - host = node.host - service_config = host.get_service_config(node.service(MorphChain).name) - wallet = service_config.attributes["wallet_path"] - return neo_go_dump_keys(shell=shell, wallet=wallet) - - -@reporter.step("Wait for object replication") -def wait_object_replication( - cid: str, - oid: str, - expected_copies: int, - shell: Shell, - nodes: list[StorageNode], - sleep_interval: int = 15, - attempts: int = 20, -) -> list[StorageNode]: - nodes_with_object = [] - for _ in range(attempts): - nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes) - if len(nodes_with_object) >= expected_copies: - return nodes_with_object - sleep(sleep_interval) - raise AssertionError( - f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. " - f"Waiting time {sleep_interval * attempts}" - ) - - -def is_all_storage_nodes_returned(cluster: Cluster) -> bool: - with reporter.step("Run health check for all storage nodes"): - for node in cluster.services(StorageNode): - try: - health_check = storage_node_healthcheck(node) - except Exception as err: - logger.warning(f"Node healthcheck fails with error {err}") - return False - if health_check.health_status != "READY" or health_check.network_status != "ONLINE": - return False - return True diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py deleted file mode 100644 index a5670cc..0000000 --- a/src/frostfs_testlib/utils/file_keeper.py +++ /dev/null @@ -1,48 +0,0 @@ -from concurrent.futures import ThreadPoolExecutor - -from frostfs_testlib import reporter -from frostfs_testlib.storage.dataclasses.node_base import NodeBase - - -class FileKeeper: - """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" - - files_to_restore: dict[NodeBase, list[str]] = {} - - @reporter.step("Adding {file_to_restore} from node {node} to restore list") - def add(self, node: NodeBase, file_to_restore: str): - if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: - # Already added - return - - if node not in self.files_to_restore: - self.files_to_restore[node] = [] - - if file_to_restore not in self.files_to_restore[node]: - self.files_to_restore[node].append(file_to_restore) - - shell = node.host.get_shell() - shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") - - @reporter.step("Restore files") - def restore_files(self): - nodes = self.files_to_restore.keys() - if not nodes: - return - - with ThreadPoolExecutor(max_workers=len(nodes)) as executor: - results = executor.map(self._restore_files_on_node, nodes) - - self.files_to_restore.clear() - - for _ in results: - # Iterate through results for exception check if any - pass - - @reporter.step("Restore files on node {node}") - def _restore_files_on_node(self, node: NodeBase): - shell = node.host.get_shell() - for file_to_restore in self.files_to_restore[node]: - with reporter.step(f"Restore file {file_to_restore} on node {node}"): - shell.exec(f"cp {file_to_restore}.bak {file_to_restore}") - shell.exec(f"rm {file_to_restore}.bak") diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py deleted file mode 100644 index 8839d7f..0000000 --- a/src/frostfs_testlib/utils/file_utils.py +++ /dev/null @@ -1,218 +0,0 @@ -import hashlib -import logging -import os -import uuid -from typing import Any, Optional - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.utils import string_utils - -logger = logging.getLogger("NeoLogger") - - -class TestFile(os.PathLike): - def __init__(self, path: str): - self.path = path - - def __del__(self): - logger.debug(f"Removing file {self.path}") - if os.path.exists(self.path): - os.remove(self.path) - - def __str__(self): - return self.path - - def __repr__(self): - return self.path - - def __fspath__(self): - return self.path - - -def ensure_directory(path): - directory = os.path.dirname(path) - - if not os.path.exists(directory): - os.makedirs(directory) - - -def ensure_directory_opener(path, flags): - ensure_directory(path) - return os.open(path, flags) - - -# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps -# Use object_size dt in future as argument -@reporter.step("Generate file") -def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: - """Generates a binary file with the specified size in bytes. - - Args: - size: Size in bytes, can be declared as 6e+6 for example. - - Returns: - The path to the generated file. - """ - - if file_name is None: - file_name = string_utils.unique_name("object-") - - test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) - with open(test_file, "wb", opener=ensure_directory_opener) as file: - file.write(os.urandom(size)) - logger.info(f"File with size {size} bytes has been generated: {test_file}") - - return test_file - - -# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps -# Use object_size dt in future as argument -@reporter.step("Generate file with content") -def generate_file_with_content( - size: int, - file_path: Optional[str | TestFile] = None, - content: Optional[str] = None, -) -> TestFile: - """Creates a new file with specified content. - - Args: - file_path: Path to the file that should be created. If not specified, then random file - path will be generated. - content: Content that should be stored in the file. If not specified, then random binary - content will be generated. - - Returns: - Path to the generated file. - """ - mode = "w+" - if content is None: - content = os.urandom(size) - mode = "wb" - - test_file = None - if not file_path: - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) - elif isinstance(file_path, TestFile): - test_file = file_path - else: - test_file = TestFile(file_path) - - with open(test_file, mode, opener=ensure_directory_opener) as file: - file.write(content) - - return test_file - - -@reporter.step("Get File Hash") -def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: - """Generates hash for the specified file. - - Args: - file_path: Path to the file to generate hash for. - len: How many bytes to read. - offset: Position to start reading from. - - Returns: - Hash of the file as hex-encoded string. - """ - file_hash = hashlib.sha256() - with open(file_path, "rb") as out: - if len and not offset: - file_hash.update(out.read(len)) - elif len and offset: - out.seek(offset, 0) - file_hash.update(out.read(len)) - elif offset and not len: - out.seek(offset, 0) - file_hash.update(out.read()) - else: - file_hash.update(out.read()) - return file_hash.hexdigest() - - -@reporter.step("Concatenation set of files to one file") -def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: - """Concatenates several files into a single file. - - Args: - file_paths: Paths to the files to concatenate. - resulting_file_path: Path to the file where concatenated content should be stored. - - Returns: - Path to the resulting file. - """ - - test_file = None - if not resulting_file_path: - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) - elif isinstance(resulting_file_path, TestFile): - test_file = resulting_file_path - else: - test_file = TestFile(resulting_file_path) - - with open(test_file, "wb", opener=ensure_directory_opener) as f: - for file in file_paths: - with open(file, "rb") as part_file: - f.write(part_file.read()) - return test_file - - -@reporter.step("Split file to {parts} parts") -def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: - """Splits specified file into several specified number of parts. - - Each part is saved under name `{original_file}_part_{i}`. - - Args: - file_path: Path to the file that should be split. - parts: Number of parts the file should be split into. - - Returns: - Paths to the part files. - """ - with open(file_path, "rb") as file: - content = file.read() - - content_size = len(content) - chunk_size = int((content_size + parts) / parts) - - part_id = 1 - part_file_paths = [] - for content_offset in range(0, content_size + 1, chunk_size): - part_file_name = f"{file_path}_part_{part_id}" - part_file_paths.append(TestFile(part_file_name)) - with open(part_file_name, "wb") as out_file: - out_file.write(content[content_offset : content_offset + chunk_size]) - part_id += 1 - - return part_file_paths - - -@reporter.step("Get file content") -def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: - """Returns content of specified file. - - Args: - file_path: Path to the file. - content_len: Limit of content length. If None, then entire file content is returned; - otherwise only the first content_len bytes of the content are returned. - mode: Mode of opening the file. - offset: Position to start reading from. - - Returns: - Content of the specified file. - """ - with open(file_path, mode) as file: - if content_len and not offset: - content = file.read(content_len) - elif content_len and offset: - file.seek(offset, 0) - content = file.read(content_len) - elif offset and not content_len: - file.seek(offset, 0) - content = file.read() - else: - content = file.read() - - return content diff --git a/src/frostfs_testlib/utils/func_utils.py b/src/frostfs_testlib/utils/func_utils.py deleted file mode 100644 index 0e22d4a..0000000 --- a/src/frostfs_testlib/utils/func_utils.py +++ /dev/null @@ -1,58 +0,0 @@ -import collections -import inspect -import sys -from typing import Callable - - -def format_by_args(__func: Callable, __title: str, *a, **kw) -> str: - params = _func_parameters(__func, *a, **kw) - args = list(map(lambda x: _represent(x), a)) - - return __title.format(*args, **params) - - -# These 2 functions are copied from allure_commons._allure -# Duplicate it here in order to be independent of allure and make some adjustments. -def _represent(item): - if isinstance(item, str): - return item - elif isinstance(item, (bytes, bytearray)): - return repr(type(item)) - else: - return repr(item) - - -def _func_parameters(func, *args, **kwargs): - parameters = {} - arg_spec = inspect.getfullargspec(func) - arg_order = list(arg_spec.args) - args_dict = dict(zip(arg_spec.args, args)) - - if arg_spec.defaults: - kwargs_defaults_dict = dict(zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults)) - parameters.update(kwargs_defaults_dict) - - if arg_spec.varargs: - arg_order.append(arg_spec.varargs) - varargs = args[len(arg_spec.args) :] - parameters.update({arg_spec.varargs: varargs} if varargs else {}) - - if arg_spec.args and arg_spec.args[0] in ["cls", "self"]: - args_dict.pop(arg_spec.args[0], None) - - if kwargs: - if sys.version_info < (3, 7): - # Sort alphabetically as old python versions does - # not preserve call order for kwargs. - arg_order.extend(sorted(list(kwargs.keys()))) - else: - # Keep py3.7 behaviour to preserve kwargs order - arg_order.extend(list(kwargs.keys())) - parameters.update(kwargs) - - parameters.update(args_dict) - - items = parameters.items() - sorted_items = sorted(map(lambda kv: (kv[0], _represent(kv[1])), items), key=lambda x: arg_order.index(x[0])) - - return collections.OrderedDict(sorted_items) diff --git a/src/frostfs_testlib/utils/json_utils.py b/src/frostfs_testlib/utils/json_utils.py deleted file mode 100644 index 5db989e..0000000 --- a/src/frostfs_testlib/utils/json_utils.py +++ /dev/null @@ -1,136 +0,0 @@ -""" - When doing requests to FrostFS, we get JSON output as an automatically decoded - structure from protobuf. Some fields are decoded with boilerplates and binary - values are Base64-encoded. - - This module contains functions which rearrange the structure and reencode binary - data from Base64 to Base58. -""" - -import base64 - -import base58 - - -def decode_simple_header(data: dict) -> dict: - """ - This function reencodes Simple Object header and its attributes. - """ - try: - data = decode_common_fields(data) - - # Normalize object attributes - data["header"]["attributes"] = { - attr["key"]: attr["value"] for attr in data["header"]["attributes"] - } - except Exception as exc: - raise ValueError(f"failed to decode JSON output: {exc}") from exc - - return data - - -def decode_split_header(data: dict) -> dict: - """ - This function rearranges Complex Object header. - The header holds SplitID, a random unique - number, which is common among all splitted objects, and IDs of the Linking - Object and the last splitted Object. - """ - try: - data["splitId"] = json_reencode(data["splitId"]) - data["lastPart"] = json_reencode(data["lastPart"]["value"]) if data["lastPart"] else None - data["link"] = json_reencode(data["link"]["value"]) if data["link"] else None - except Exception as exc: - raise ValueError(f"failed to decode JSON output: {exc}") from exc - - return data - - -def decode_linking_object(data: dict) -> dict: - """ - This function reencodes Linking Object header. - It contains IDs of child Objects and Split Chain data. - """ - try: - data = decode_simple_header(data) - split = data["header"]["split"] - split["children"] = [json_reencode(item["value"]) for item in split["children"]] - split["splitID"] = json_reencode(split["splitID"]) - split["previous"] = json_reencode(split["previous"]["value"]) if split["previous"] else None - split["parent"] = json_reencode(split["parent"]["value"]) if split["parent"] else None - except Exception as exc: - raise ValueError(f"failed to decode JSON output: {exc}") from exc - - return data - - -def decode_storage_group(data: dict) -> dict: - """ - This function reencodes Storage Group header. - """ - try: - data = decode_common_fields(data) - except Exception as exc: - raise ValueError(f"failed to decode JSON output: {exc}") from exc - - return data - - -def decode_tombstone(data: dict) -> dict: - """ - This function re-encodes Tombstone header. - """ - try: - data = decode_simple_header(data) - data["header"]["sessionToken"] = decode_session_token(data["header"]["sessionToken"]) - except Exception as exc: - raise ValueError(f"failed to decode JSON output: {exc}") from exc - return data - - -def decode_session_token(data: dict) -> dict: - """ - This function re-encodes a fragment of header which contains - information about session token. - """ - target = data["body"]["object"]["target"] - target["container"] = json_reencode(target["container"]["value"]) - target["objects"] = [json_reencode(obj["value"]) for obj in target["objects"]] - return data - - -def json_reencode(data: str) -> str: - """ - According to JSON protocol, binary data (Object/Container/Storage Group IDs, etc) - is converted to string via Base58 encoder. But we usually operate with Base64-encoded format. - This function reencodes given Base58 string into the Base64 one. - """ - return base58.b58encode(base64.b64decode(data)).decode("utf-8") - - -def encode_for_json(data: str) -> str: - """ - This function encodes binary data for sending them as protobuf - structures. - """ - return base64.b64encode(base58.b58decode(data)).decode("utf-8") - - -def decode_common_fields(data: dict) -> dict: - """ - Despite of type (simple/complex Object, Storage Group, etc) every Object - header contains several common fields. - This function rearranges these fields. - """ - data["objectID"] = json_reencode(data["objectID"]["value"]) - - header = data["header"] - header["containerID"] = json_reencode(header["containerID"]["value"]) - header["ownerID"] = json_reencode(header["ownerID"]["value"]) - header["payloadHash"] = json_reencode(header["payloadHash"]["sum"]) - header["version"] = f"{header['version']['major']}{header['version']['minor']}" - # Homomorphic hash is optional and its calculation might be disabled in trusted network - if header.get("homomorphicHash"): - header["homomorphicHash"] = json_reencode(header["homomorphicHash"]["sum"]) - - return data diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py deleted file mode 100644 index acbca92..0000000 --- a/src/frostfs_testlib/utils/string_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -import itertools -import random -import re -import string -from datetime import datetime - -ONLY_ASCII_LETTERS = string.ascii_letters -DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits -NON_DIGITS_AND_LETTERS = string.punctuation - -# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique -FUSE = itertools.cycle(range(5)) - - -def unique_name(prefix: str = "", postfix: str = ""): - """ - Generate unique short name of anything with prefix. - This should be unique in scope of multiple runs - - Args: - prefix: prefix for unique name generation - Returns: - unique name string - """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" - - -def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): - """ - Generate random string from source letters list - - Args: - length: length for generated string - source: source string with letters for generate random string - Returns: - (str): random string with len == length - """ - - return "".join(random.choice(source) for i in range(length)) - - -def is_str_match_pattern(error: Exception, status_pattern: str) -> bool: - """ - Determines whether exception matches specified status pattern. - - We use re.search() to be consistent with pytest.raises. - """ - match = re.search(status_pattern, str(error)) - - return match is not None diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py deleted file mode 100644 index 0676085..0000000 --- a/src/frostfs_testlib/utils/version_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -import logging -import re -from functools import lru_cache - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.hosting import Host, Hosting -from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.shell import Shell -from frostfs_testlib.testing.parallel import parallel - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Get local binaries versions") -def get_local_binaries_versions(shell: Shell) -> dict[str, str]: - versions = {} - - for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: - out = shell.exec(f"{binary} --version").stdout - versions[binary] = parse_version(out) - - frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) - versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) - - try: - frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) - versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) - except RuntimeError: - logger.info(f"{FROSTFS_ADM_EXEC} not installed") - - out = shell.exec("aws --version").stdout - out_lines = out.split("\n") - versions["AWS"] = out_lines[0] if out_lines else "Unknown" - logger.info(f"Local binaries version: {out_lines[0]}") - - return versions - - -@reporter.step("Collect binaries versions from host") -def parallel_binary_verions(host: Host) -> dict[str, str]: - versions_by_host = {} - - binary_path_by_name = { - **{ - svc.name[:-3]: { - "exec_path": svc.attributes.get("exec_path"), - "param": svc.attributes.get("custom_version_parameter", "--version"), - } - for svc in host.config.services - if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" - }, - **{ - cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} - for cli in host.config.clis - if cli.attributes.get("requires_version_check", "true") == "true" - }, - } - - shell = host.get_shell() - versions_at_host = {} - for binary_name, binary in binary_path_by_name.items(): - binary_path = binary["exec_path"] - try: - result = shell.exec(f"{binary_path} {binary['param']}") - version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version.strip() - except Exception as exc: - logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = "Unknown" - versions_by_host[host.config.address] = versions_at_host - return versions_by_host - - -@lru_cache -def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: - versions_by_host: dict[str, dict[str, str]] = {} - - with reporter.step("Get remote binaries versions"): - future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) - - for future in future_binary_verions: - versions_by_host.update(future.result()) - - return versions_by_host - - -def parse_version(version_output: str) -> str: - version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip("\"'\n\t ") if version else version_output diff --git a/src/frostfs_testlib/utils/wallet_utils.py b/src/frostfs_testlib/utils/wallet_utils.py deleted file mode 100644 index d2b4229..0000000 --- a/src/frostfs_testlib/utils/wallet_utils.py +++ /dev/null @@ -1,73 +0,0 @@ -import base64 -import json -import logging - -import base58 -from neo3.wallet import account as neo3_account -from neo3.wallet import wallet as neo3_wallet - -logger = logging.getLogger("frostfs.testlib.utils") - - -def __fix_wallet_schema(wallet: dict) -> None: - # Temporary function to fix wallets that do not conform to the schema - # TODO: get rid of it once issue is solved - if "name" not in wallet: - wallet["name"] = None - for account in wallet["accounts"]: - if "extra" not in account: - account["extra"] = None - - -def init_wallet(wallet_path: str, wallet_password: str): - """ - Create new wallet and new account. - Args: - wallet_path: The path to the wallet to save wallet. - wallet_password: The password for new wallet. - """ - wallet = neo3_wallet.Wallet() - account = neo3_account.Account.create_new(wallet_password) - wallet.account_add(account) - with open(wallet_path, "w") as out: - json.dump(wallet.to_json(), out) - logger.info(f"Init new wallet: {wallet_path}, address: {account.address}") - - -def get_last_address_from_wallet(wallet_path: str, wallet_password: str): - """ - Extracting the last address from the given wallet. - Args: - wallet_path: The path to the wallet to extract address from. - wallet_password: The password for the given wallet. - Returns: - The address for the wallet. - """ - wallet = load_wallet(wallet_path, wallet_password) - address = wallet.accounts[-1].address - logger.info(f"got address: {address}") - return address - - -def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: - wallet = load_wallet(wallet_path, wallet_password) - public_key_hex = str(wallet.accounts[0].public_key) - - # Convert public key to specified format - if format == "hex": - return public_key_hex - if format == "base58": - public_key_base58 = base58.b58encode(bytes.fromhex(public_key_hex)) - return public_key_base58.decode("utf-8") - if format == "base64": - public_key_base64 = base64.b64encode(bytes.fromhex(public_key_hex)) - return public_key_base64.decode("utf-8") - raise ValueError(f"Invalid public key format: {format}") - - -def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet: - with open(wallet_path) as wallet_file: - wallet_content = json.load(wallet_file) - - __fix_wallet_schema(wallet_content) - return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index ea6d681..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,5 +0,0 @@ -import os -import sys - -app_dir = os.path.join(os.getcwd(), "src") -sys.path.insert(0, app_dir) diff --git a/tests/helpers.py b/tests/helpers.py deleted file mode 100644 index b7776fd..0000000 --- a/tests/helpers.py +++ /dev/null @@ -1,32 +0,0 @@ -import traceback - -from frostfs_testlib.shell.interfaces import CommandResult - - -def format_error_details(error: Exception) -> str: - """Converts specified exception instance into a string. - - The resulting string includes error message and the full stack trace. - - Args: - error: Exception to convert. - - Returns: - String containing exception details. - """ - detail_lines = traceback.format_exception(error) - return "".join(detail_lines) - - -def get_output_lines(result: CommandResult) -> list[str]: - """Converts output of specified command result into separate lines. - - Whitespaces are trimmed, empty lines are excluded. - - Args: - result: Command result which output should be converted. - - Returns: - List of lines extracted from the output. - """ - return [line.strip() for line in result.stdout.split("\n") if line.strip()] diff --git a/tests/test_cli.py b/tests/test_cli.py deleted file mode 100644 index 6f4d791..0000000 --- a/tests/test_cli.py +++ /dev/null @@ -1,174 +0,0 @@ -from unittest import TestCase -from unittest.mock import Mock - -from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput - - -class TestCli(TestCase): - frostfs_adm_exec_path = "neo-adm-exec" - frostfs_go_exec_path = "neo-go-exec" - frostfs_cli_exec_path = "neo-cli-exec" - - address = "0x0000000000000000000" - addresses = ["0x000000", "0xDEADBEEF", "0xBABECAFE"] - amount = 100 - file1 = "file_1" - file2 = "directory/file_2" - manifest = "manifest1" - token = "GAS" - rpc_endpoint = "endpoint-1" - sysgas: float = 0.001 - wallet = "wallet1" - wallet_password = "P@$$w0rd" - config_file = "config.yml" - basic_acl = "1FBFBFFF" - policy = "policy1" - timeout = 20 - xhdr = {"param1": "value1", "param2": "value2"} - - def test_container_create(self): - shell = Mock() - frostfs_cli = FrostfsCli( - config_file=self.config_file, - frostfs_cli_exec_path=self.frostfs_cli_exec_path, - shell=shell, - ) - frostfs_cli.container.create( - rpc_endpoint=self.rpc_endpoint, - wallet=self.wallet, - basic_acl=self.basic_acl, - policy=self.policy, - await_mode=True, - xhdr=self.xhdr, - ) - - xhdr = ",".join(f"{param}={value}" for param, value in self.xhdr.items()) - expected_command = ( - f"{self.frostfs_cli_exec_path} --config {self.config_file} container create " - f"--rpc-endpoint '{self.rpc_endpoint}' --wallet '{self.wallet}' " - f"--basic-acl '{self.basic_acl}' --await --policy '{self.policy}' " - f"--xhdr '{xhdr}'" - ) - - shell.exec.assert_called_once_with(expected_command) - - def test_bad_wallet_argument(self): - shell = Mock() - neo_go = NeoGo( - shell=shell, config_path=self.config_file, neo_go_exec_path=self.frostfs_go_exec_path - ) - with self.assertRaises(Exception) as exc_msg: - neo_go.contract.add_group( - address=self.address, - manifest=self.manifest, - wallet_password=self.wallet_password, - ) - self.assertEqual(CliCommand.WALLET_SOURCE_ERROR_MSG, str(exc_msg.exception)) - - with self.assertRaises(Exception) as exc_msg: - neo_go.contract.add_group( - wallet=self.wallet, - wallet_password=self.wallet_password, - wallet_config=self.config_file, - address=self.address, - manifest=self.manifest, - ) - self.assertEqual(CliCommand.WALLET_SOURCE_ERROR_MSG, str(exc_msg.exception)) - - with self.assertRaises(Exception) as exc_msg: - neo_go.contract.add_group( - wallet=self.wallet, - address=self.address, - manifest=self.manifest, - ) - self.assertEqual(CliCommand.WALLET_PASSWD_ERROR_MSG, str(exc_msg.exception)) - - def test_wallet_sign(self): - shell = Mock() - neo_go = NeoGo( - shell=shell, config_path=self.config_file, neo_go_exec_path=self.frostfs_go_exec_path - ) - neo_go.wallet.sign( - input_file=self.file1, - out=self.file2, - rpc_endpoint=self.rpc_endpoint, - address=self.address, - wallet=self.wallet, - wallet_password=self.wallet_password, - timeout=self.timeout, - ) - - expected_command = ( - f"{self.frostfs_go_exec_path} --config_path {self.config_file} wallet sign " - f"--input-file '{self.file1}' --address '{self.address}' " - f"--rpc-endpoint '{self.rpc_endpoint}' --wallet '{self.wallet}' " - f"--out '{self.file2}' --timeout '{self.timeout}s'" - ) - - shell.exec.assert_called_once_with( - expected_command, - options=CommandOptions( - interactive_inputs=[ - InteractiveInput(prompt_pattern="assword", input=self.wallet_password) - ] - ), - ) - - def test_subnet_create(self): - shell = Mock() - frostfs_adm = FrostfsAdm( - config_file=self.config_file, - frostfs_adm_exec_path=self.frostfs_adm_exec_path, - shell=shell, - ) - frostfs_adm.subnet.create( - address=self.address, - rpc_endpoint=self.rpc_endpoint, - wallet=self.wallet, - notary=True, - ) - - expected_command = ( - f"{self.frostfs_adm_exec_path} --config {self.config_file} morph subnet create " - f"--rpc-endpoint '{self.rpc_endpoint}' --address '{self.address}' " - f"--wallet '{self.wallet}' --notary" - ) - - shell.exec.assert_called_once_with(expected_command) - - def test_wallet_nep17_multitransfer(self): - shell = Mock() - neo_go = NeoGo( - shell=shell, config_path=self.config_file, neo_go_exec_path=self.frostfs_go_exec_path - ) - neo_go.nep17.multitransfer( - wallet=self.wallet, - token=self.token, - to_address=self.addresses, - sysgas=self.sysgas, - rpc_endpoint=self.rpc_endpoint, - amount=self.amount, - force=True, - from_address=self.address, - timeout=self.timeout, - ) - - to_address = "".join(f" --to '{address}'" for address in self.addresses) - expected_command = ( - f"{self.frostfs_go_exec_path} --config_path {self.config_file} " - f"wallet nep17 multitransfer --token '{self.token}'" - f"{to_address} --sysgas '{self.sysgas}' --rpc-endpoint '{self.rpc_endpoint}' " - f"--wallet '{self.wallet}' --from '{self.address}' --force --amount {self.amount} " - f"--timeout '{self.timeout}s'" - ) - - shell.exec.assert_called_once_with(expected_command) - - def test_version(self): - shell = Mock() - frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=self.frostfs_adm_exec_path) - frostfs_adm.version.get() - - shell.exec.assert_called_once_with(f"{self.frostfs_adm_exec_path} --version") diff --git a/tests/test_converters.py b/tests/test_converters.py deleted file mode 100644 index 8ff923d..0000000 --- a/tests/test_converters.py +++ /dev/null @@ -1,46 +0,0 @@ -from unittest import TestCase - -from frostfs_testlib.utils import converting_utils - - -class TestConverters(TestCase): - def test_str_to_ascii_hex(self): - source_str = "" - result_str = "" - self.assertEqual(converting_utils.str_to_ascii_hex(source_str), result_str) - - source_str = '"test_data" f0r ^convert*' - result_str = "22746573745f646174612220663072205e636f6e766572742a" - self.assertEqual(converting_utils.str_to_ascii_hex(source_str), result_str) - - def test_ascii_hex_to_str(self): - source_str = "" - result_bytes = b"" - self.assertEqual(converting_utils.ascii_hex_to_str(source_str), result_bytes) - - source_str = "22746573745f646174612220663072205e636f6e766572742a" - result_bytes = b'"test_data" f0r ^convert*' - self.assertEqual(converting_utils.ascii_hex_to_str(source_str), result_bytes) - - def test_process_b64_bytearray_reverse(self): - source_str = "" - result_bytes = b"" - self.assertEqual(converting_utils.process_b64_bytearray_reverse(source_str), result_bytes) - - source_str = "InRlc3RfZGF0YSIgZjByIF5jb252ZXJ0Kg==" - result_bytes = b"2a747265766e6f635e207230662022617461645f7473657422" - self.assertEqual(converting_utils.process_b64_bytearray_reverse(source_str), result_bytes) - - def test_process_b64_bytearray(self): - source_str = "" - result_bytes = b"" - self.assertEqual(converting_utils.process_b64_bytearray(source_str), result_bytes) - - source_str = "InRlc3RfZGF0YSIgZjByIF5jb252ZXJ0Kg==" - result_bytes = b"22746573745f646174612220663072205e636f6e766572742a" - self.assertEqual(converting_utils.process_b64_bytearray(source_str), result_bytes) - - def test_contract_hash_to_address(self): - source_str = "d01a381aae45f1ed181db9d554cc5ccc69c69f4e" - result_str = "NT5hJ5peVmvYdZCsFKUM5MTcEGw5TB4k89" - self.assertEqual(converting_utils.contract_hash_to_address(source_str), result_str) diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py deleted file mode 100644 index 677aed4..0000000 --- a/tests/test_dataclasses.py +++ /dev/null @@ -1,33 +0,0 @@ -from typing import Any - -import pytest - -from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper -from frostfs_testlib.storage.dataclasses.acl import EACLRole -from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.object_size import ObjectSize - - -class TestDataclassesStr: - """Here we are testing important classes string representation.""" - - @pytest.mark.parametrize( - "obj, expected", - [ - (Boto3ClientWrapper, "Boto3 client"), - (AwsCliClient, "AWS CLI"), - (ObjectSize("simple", 1), "simple"), - (ObjectSize("simple", 10), "simple"), - (ObjectSize("complex", 5000), "complex"), - (ObjectSize("complex", 5555), "complex"), - (StorageNode, "StorageNode"), - (MorphChain, "MorphChain"), - (S3Gate, "S3Gate"), - (HTTPGate, "HTTPGate"), - (InnerRing, "InnerRing"), - (EACLRole.OTHERS, "OTHERS"), - ], - ) - def test_classes_string_representation(self, obj: Any, expected: str): - assert f"{obj}" == expected - assert repr(obj) == expected diff --git a/tests/test_hosting.py b/tests/test_hosting.py deleted file mode 100644 index 39580cb..0000000 --- a/tests/test_hosting.py +++ /dev/null @@ -1,109 +0,0 @@ -from unittest import TestCase - -from frostfs_testlib.hosting import CLIConfig, Hosting, ServiceConfig - - -class TestHosting(TestCase): - SERVICE_NAME_PREFIX = "service" - HOST1_ADDRESS = "10.10.10.10" - HOST1_PLUGIN = "docker" - HOST1_ATTRIBUTES = {"param1": "value1"} - SERVICE1_ATTRIBUTES = {"rpc_endpoint": "service1_endpoint"} - HOST1_CLIS = [{"name": "cli1", "exec_path": "cli1.exe", "attributes": {"param1": "value1"}}] - SERVICE1 = {"name": f"{SERVICE_NAME_PREFIX}1", "attributes": SERVICE1_ATTRIBUTES} - HOST1_SERVICES = [SERVICE1] - HOST1 = { - "address": HOST1_ADDRESS, - "plugin_name": HOST1_PLUGIN, - "healthcheck_plugin_name": "basic", - "attributes": HOST1_ATTRIBUTES, - "clis": HOST1_CLIS, - "services": HOST1_SERVICES, - } - - HOST2_ADDRESS = "localhost" - HOST2_PLUGIN = "docker" - HOST2_ATTRIBUTES = {"param2": "value2"} - SERVICE2_ATTRIBUTES = {"rpc_endpoint": "service2_endpoint"} - SERVICE3_ATTRIBUTES = {"rpc_endpoint": "service3_endpoint"} - HOST2_CLIS = [{"name": "cli2", "exec_path": "/bin/cli", "attributes": {}}] - SERVICE2 = {"name": f"{SERVICE_NAME_PREFIX}", "attributes": SERVICE2_ATTRIBUTES} - SERVICE3 = {"name": f"text_before_{SERVICE_NAME_PREFIX}3", "attributes": SERVICE3_ATTRIBUTES} - HOST2_SERVICES = [SERVICE2, SERVICE3] - HOST2 = { - "address": HOST2_ADDRESS, - "plugin_name": HOST2_PLUGIN, - "healthcheck_plugin_name": "basic", - "attributes": HOST2_ATTRIBUTES, - "clis": HOST2_CLIS, - "services": HOST2_SERVICES, - } - HOSTING_CONFIG = {"hosts": [HOST1, HOST2]} - - def test_hosting_configure(self): - hosting = Hosting() - hosting.configure(self.HOSTING_CONFIG) - self.assertEqual(len(hosting.hosts), 2) - - def test_get_host_by_address(self): - hosting = Hosting() - hosting.configure(self.HOSTING_CONFIG) - - host1 = hosting.get_host_by_address(self.HOST1_ADDRESS) - self.assertEqual(host1.config.address, self.HOST1_ADDRESS) - self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) - self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) - self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) - self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]) - - host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) - self.assertEqual(host2.config.address, self.HOST2_ADDRESS) - self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) - self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) - self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) - self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]) - - def test_get_host_by_service(self): - hosting = Hosting() - hosting.configure(self.HOSTING_CONFIG) - - host_with_service1 = hosting.get_host_by_service(self.SERVICE1["name"]) - host_with_service2 = hosting.get_host_by_service(self.SERVICE2["name"]) - host_with_service3 = hosting.get_host_by_service(self.SERVICE3["name"]) - - self.assertEqual(host_with_service1.config.address, self.HOST1_ADDRESS) - self.assertEqual(host_with_service2.config.address, self.HOST2_ADDRESS) - self.assertEqual(host_with_service3.config.address, self.HOST2_ADDRESS) - - def test_get_service_config(self): - hosting = Hosting() - hosting.configure(self.HOSTING_CONFIG) - - service1_config = hosting.get_service_config(self.SERVICE1["name"]) - service2_config = hosting.get_service_config(self.SERVICE2["name"]) - service3_config = hosting.get_service_config(self.SERVICE3["name"]) - - self.assertEqual(service1_config.name, self.SERVICE1["name"]) - self.assertDictEqual(service1_config.attributes, self.SERVICE1_ATTRIBUTES) - - self.assertEqual(service2_config.name, self.SERVICE2["name"]) - self.assertDictEqual(service2_config.attributes, self.SERVICE2_ATTRIBUTES) - - self.assertEqual(service3_config.name, self.SERVICE3["name"]) - self.assertDictEqual(service3_config.attributes, self.SERVICE3_ATTRIBUTES) - - def test_find_service_configs(self): - hosting = Hosting() - hosting.configure(self.HOSTING_CONFIG) - - all_services = hosting.find_service_configs(r".+") - self.assertEqual(len(all_services), 3) - - services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") - self.assertEqual(len(services), 2) - for service in services: - self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX) - - service1 = hosting.find_service_configs(self.SERVICE1["name"]) - self.assertEqual(len(service1), 1) - self.assertDictEqual(service1[0].attributes, self.SERVICE1_ATTRIBUTES) diff --git a/tests/test_load_config.py b/tests/test_load_config.py deleted file mode 100644 index fbeb587..0000000 --- a/tests/test_load_config.py +++ /dev/null @@ -1,798 +0,0 @@ -from dataclasses import Field, dataclass, fields, is_dataclass -from typing import Any, get_args - -import pytest - -from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom -from frostfs_testlib.load.runners import DefaultRunner -from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME -from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController - - -@dataclass -class MetaTestField: - field: Field - field_type: type - instance: Any - - -class TestLoadConfig: - @pytest.fixture - def set_empty(self, request: pytest.FixtureRequest): - # Workaround for verify - if "param" in request.__dict__ and request.param: - return request.param - - return False - - @pytest.fixture - def load_type(self, request: pytest.FixtureRequest): - # Workaround for verify - if "param" in request.__dict__ and request.param: - return request.param - - return None - - @pytest.fixture - def load_params(self, load_type: LoadType, set_empty: bool, request: pytest.FixtureRequest): - load_scenario = request.param - return self._get_filled_load_params(load_type, load_scenario, set_empty) - - def test_load_params_only_load_type_required(self): - load_params = LoadParams(load_type=LoadType.S3) - expected = "s3" - assert repr(load_params) == expected - assert f"{load_params}" == expected - - def test_load_params_init_time(self): - load_params = LoadParams(load_type=LoadType.S3) - vus = 100 - - load_params.vu_init_time = BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME - # Used in time calculations - load_params.readers = vus - load_params.writers = vus - load_params.preallocated_readers = vus - load_params.preallocated_writers = vus - - # Not used in time calculations - load_params.deleters = vus - load_params.preallocated_deleters = vus - - expected = vus * 4 * BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME - actual = load_params.get_init_time() - assert actual == expected, "Incorrect time for get_init_time()" - - def test_load_params_initially_have_all_values_none(self): - load_params = LoadParams(load_type=LoadType.S3) - self._check_all_values_none(load_params, ["load_type", "scenario"]) - - def test_preset_initially_have_all_values_none(self): - preset = Preset() - self._check_all_values_none(preset) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) - def test_string_representation_s3_car(self, load_params: LoadParams): - load_params.object_size = 524288 - expected = "s3_car 512 MiB, write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" - assert f"{load_params}" == expected - assert repr(load_params) == expected - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) - def test_string_representation_grpc(self, load_params: LoadParams): - load_params.object_size = 512 - expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" - assert f"{load_params}" == expected - assert repr(load_params) == expected - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) - def test_load_controller_string_representation(self, load_params: LoadParams): - load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL - load_params.object_size = 512 - background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) - expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" - assert f"{background_load_controller}" == expected - assert repr(background_load_controller) == expected - - def test_load_set_id_changes_fields(self): - load_params = LoadParams(load_type=LoadType.S3) - load_params.preset = Preset() - load_params.read_from = ReadFrom["REGISTRY"] - load_params.working_dir = "/tmp" - load_params.set_id("test_id") - - assert load_params.registry_file == "/tmp/test_id_registry.bolt" - assert load_params.preset.pregen_json == "/tmp/test_id_prepare.json" - assert load_params.load_id == "test_id" - - # No other values should be changed - self._check_all_values_none( - load_params, - [ - "load_type", - "working_dir", - "load_id", - "registry_file", - "preset", - "scenario", - "read_from", - ], - ) - self._check_all_values_none(load_params.preset, ["pregen_json", "scenario"]) - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) - def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--acl 'acl'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--local", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "K6_OUT": "output", - "PREGEN_JSON": "pregen_json", - "PREPARE_LOCALLY": True, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC_CAR], indirect=True) - def test_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--local", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "K6_OUT": "output", - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "MAX_WRITERS": 11, - "MAX_READERS": 11, - "MAX_DELETERS": 12, - "PRE_ALLOC_DELETERS": 21, - "PRE_ALLOC_READERS": 20, - "PRE_ALLOC_WRITERS": 20, - "PREGEN_JSON": "pregen_json", - "TIME_UNIT": "time_unit", - "WRITE_RATE": 10, - "READ_RATE": 9, - "READ_AGE": 8, - "DELETE_RATE": 11, - "STREAMING": 9, - "PREPARE_LOCALLY": True, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3], indirect=True) - def test_argument_parsing_for_s3_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--no-verify-ssl", - "--out 'pregen_json'", - "--workers '7'", - "--buckets '13'", - "--location 's3_location' --location 's3_location_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_OUT": "output", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "NO_VERIFY_SSL": True, - "PREGEN_JSON": "pregen_json", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) - def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams): - load_params.load_time = "2d3h5min" - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--no-verify-ssl", - "--out 'pregen_json'", - "--workers '7'", - "--buckets '13'", - "--location 's3_location' --location 's3_location_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 183900, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_OUT": "output", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "MAX_WRITERS": 11, - "MAX_READERS": 11, - "MAX_DELETERS": 12, - "PRE_ALLOC_DELETERS": 21, - "PRE_ALLOC_READERS": 20, - "PRE_ALLOC_WRITERS": 20, - "PREGEN_JSON": "pregen_json", - "TIME_UNIT": "time_unit", - "WRITE_RATE": 10, - "READ_RATE": 9, - "READ_AGE": 8, - "STREAMING": 9, - "DELETE_RATE": 11, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) - def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--no-verify-ssl", - "--out 'pregen_json'", - "--workers '7'", - "--buckets '13'", - "--location 's3_location' --location 's3_location_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_OUT": "output", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "MAX_WRITERS": 11, - "MAX_READERS": 11, - "MAX_DELETERS": 12, - "PRE_ALLOC_DELETERS": 21, - "PRE_ALLOC_READERS": 20, - "PRE_ALLOC_WRITERS": 20, - "PREGEN_JSON": "pregen_json", - "TIME_UNIT": "time_unit", - "WRITE_RATE": 10, - "READ_RATE": 9, - "READ_AGE": 8, - "STREAMING": 9, - "DELETE_RATE": 11, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) - def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): - load_params.preset.local = False - expected_preset_args = [ - "--no-verify-ssl", - "--size '11'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "K6_OUT": "output", - "NO_VERIFY_SSL": True, - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "PREGEN_JSON": "pregen_json", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) - def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): - load_params.preset.local = False - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "CONFIG_DIR": "config_dir", - "CONFIG_FILE": "config_file", - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "K6_OUT": "output", - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "MAX_TOTAL_SIZE_GB": 17, - "PREGEN_JSON": "pregen_json", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "input, value, params", - [ - (["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]), - (" A ", ["A"], ["--policy 'A'"]), - (" A , B ", ["A , B"], ["--policy 'A , B'"]), - ([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]), - (None, None, []), - ], - ) - def test_grpc_list_parsing_formatter(self, input, value, params): - load_params = LoadParams(LoadType.gRPC) - load_params.preset = Preset() - load_params.preset.container_placement_policy = input - assert load_params.preset.container_placement_policy == value - - self._check_preset_params(load_params, params) - - @pytest.mark.parametrize( - "input, value, params", - [ - (["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]), - (" A ", ["A"], ["--location 'A'"]), - (" A , B ", ["A , B"], ["--location 'A , B'"]), - ([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]), - (None, None, []), - ], - ) - def test_s3_list_parsing_formatter(self, input, value, params): - load_params = LoadParams(LoadType.S3) - load_params.preset = Preset() - load_params.preset.s3_location = input - assert load_params.preset.s3_location == value - - self._check_preset_params(load_params, params) - - @pytest.mark.parametrize( - "load_type, input, value, params", - [ - (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), - (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), - (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), - (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), - (LoadType.gRPC, None, None, []), - (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), - (LoadType.S3, None, None, []), - ], - ) - def test_ape_list_parsing_formatter(self, load_type, input, value, params): - load_params = LoadParams(load_type) - load_params.preset = Preset() - load_params.preset.rule = input - assert load_params.preset.rule == value - - self._check_preset_params(load_params, params) - - @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) - def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 14, - "REGISTRY_FILE": "registry_file", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "TIME_LIMIT": 11, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True) - def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 14, - "REGISTRY_FILE": "registry_file", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "TIME_LIMIT": 11, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC, True)], indirect=True) - def test_empty_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREGEN_JSON": "", - "PREPARE_LOCALLY": False, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True) - def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "MAX_WRITERS": 0, - "MAX_READERS": 0, - "MAX_DELETERS": 0, - "PRE_ALLOC_DELETERS": 0, - "PRE_ALLOC_READERS": 0, - "PRE_ALLOC_WRITERS": 0, - "PREGEN_JSON": "", - "TIME_UNIT": "", - "WRITE_RATE": 0, - "READ_RATE": 0, - "DELETE_RATE": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREPARE_LOCALLY": False, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3, True)], indirect=True) - def test_empty_argument_parsing_for_s3_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--out ''", - "--workers '0'", - "--buckets '0'", - "--location ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "NO_VERIFY_SSL": False, - "PREGEN_JSON": "", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3_CAR, True)], indirect=True) - def test_empty_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--out ''", - "--workers '0'", - "--buckets '0'", - "--location ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "NO_VERIFY_SSL": False, - "MAX_WRITERS": 0, - "MAX_READERS": 0, - "MAX_DELETERS": 0, - "PRE_ALLOC_DELETERS": 0, - "PRE_ALLOC_READERS": 0, - "PRE_ALLOC_WRITERS": 0, - "PREGEN_JSON": "", - "TIME_UNIT": "", - "WRITE_RATE": 0, - "READ_RATE": 0, - "DELETE_RATE": 0, - "READ_AGE": 0, - "STREAMING": 0, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.HTTP, True)], indirect=True) - def test_empty_argument_parsing_for_http_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "NO_VERIFY_SSL": False, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREGEN_JSON": "", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.LOCAL, True)], indirect=True) - def test_empty_argument_parsing_for_local_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "CONFIG_DIR": "", - "CONFIG_FILE": "", - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "MAX_TOTAL_SIZE_GB": 0, - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREGEN_JSON": "", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "load_params, load_type, set_empty", - [(LoadScenario.VERIFY, LoadType.S3, True)], - indirect=True, - ) - def test_empty_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 0, - "REGISTRY_FILE": "", - "K6_SETUP_TIMEOUT": "", - "NO_VERIFY_SSL": False, - "TIME_LIMIT": 0, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "load_params, load_type, set_empty", - [(LoadScenario.VERIFY, LoadType.gRPC, True)], - indirect=True, - ) - def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 0, - "REGISTRY_FILE": "", - "K6_SETUP_TIMEOUT": "", - "NO_VERIFY_SSL": False, - "TIME_LIMIT": 0, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "load_params, load_type", - [(LoadScenario.gRPC, LoadType.gRPC)], - indirect=True, - ) - @pytest.mark.parametrize( - "load_time, expected_seconds", - [ - (300, 300), - ("2d3h45min", 186300), - ("1d6h", 108000), - ("1d", 86400), - ("1d1min", 86460), - ("2h", 7200), - ("2h2min", 7320), - ], - ) - def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int): - load_params.load_time = load_time - assert load_params.load_time == expected_seconds - - def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): - preset_parameters = load_params.get_preset_arguments() - assert sorted(preset_parameters) == sorted(expected_preset_args) - - def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): - env_vars = load_params.get_k6_vars() - assert env_vars == expected_env_vars - - def _check_all_values_none(self, dataclass, skip_fields=None): - if skip_fields is None: - skip_fields = [] - - dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] - for field in dataclass_fields: - value = getattr(dataclass, field.name) - assert value is None, f"{field.name} is not None" - - def _check_all_values_not_none(self, dataclass, skip_fields=None): - if skip_fields is None: - skip_fields = [] - - dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] - for field in dataclass_fields: - value = getattr(dataclass, field.name) - assert value is not None, f"{field.name} is not None" - - def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: - load_type_map = { - LoadScenario.S3: LoadType.S3, - LoadScenario.S3_CAR: LoadType.S3, - LoadScenario.gRPC: LoadType.gRPC, - LoadScenario.gRPC_CAR: LoadType.gRPC, - LoadScenario.LOCAL: LoadType.gRPC, - LoadScenario.HTTP: LoadType.HTTP, - } - load_type = load_type_map[load_scenario] if not load_type else load_type - - load_params = LoadParams(load_type) - load_params.scenario = load_scenario - load_params.preset = Preset() - - meta_fields = self._get_meta_fields(load_params) - for field in meta_fields: - if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: - value_to_set_map = { - int: 0 if set_emtpy else len(field.field.name), - float: 0 if set_emtpy else len(field.field.name), - str: "" if set_emtpy else field.field.name, - list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"], - bool: False if set_emtpy else True, - } - value_to_set = value_to_set_map[field.field_type] - setattr(field.instance, field.field.name, value_to_set) - - return load_params - - def _get_actual_field_type(self, field: Field) -> type: - return get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) - - def _get_meta_fields(self, instance): - data_fields = fields(instance) - fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] - - for field in data_fields: - actual_field_type = self._get_actual_field_type(field) - if is_dataclass(actual_field_type) and getattr(instance, field.name): - fields_with_data += self._get_meta_fields(getattr(instance, field.name)) - - return fields_with_data or [] diff --git a/tests/test_local_shell.py b/tests/test_local_shell.py deleted file mode 100644 index 6261919..0000000 --- a/tests/test_local_shell.py +++ /dev/null @@ -1,114 +0,0 @@ -from unittest import TestCase - -from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.shell.local_shell import LocalShell -from helpers import format_error_details, get_output_lines - - -class TestLocalShellInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = LocalShell() - - def test_command_with_one_prompt(self): - script = "password = input('Password: '); print(password)" - - inputs = [InteractiveInput(prompt_pattern="Password", input="test")] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) - - self.assertEqual(0, result.return_code) - self.assertEqual(["Password: test", "test"], get_output_lines(result)) - self.assertEqual("", result.stderr) - - def test_command_with_several_prompts(self): - script = ( - "input1 = input('Input1: '); print(input1); " - "input2 = input('Input2: '); print(input2)" - ) - inputs = [ - InteractiveInput(prompt_pattern="Input1", input="test1"), - InteractiveInput(prompt_pattern="Input2", input="test2"), - ] - - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) - - self.assertEqual(0, result.return_code) - self.assertEqual( - ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) - ) - self.assertEqual("", result.stderr) - - def test_failed_command_with_check(self): - script = "invalid script" - inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - - with self.assertRaises(RuntimeError) as exc: - self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - - error = format_error_details(exc.exception) - self.assertIn("Error", error) - # TODO: it would be nice to have return code as well - # self.assertIn("return code: 1", error) - - def test_failed_command_without_check(self): - script = "invalid script" - inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - - result = self.shell.exec( - f'python3 -c "{script}"', - CommandOptions(interactive_inputs=inputs, check=False), - ) - self.assertEqual(1, result.return_code) - - def test_non_existing_binary(self): - inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - - with self.assertRaises(RuntimeError) as exc: - self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) - - error = format_error_details(exc.exception) - self.assertIn("The command was not found", error) - - -class TestLocalShellNonInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = LocalShell() - - def test_successful_command(self): - script = "print('test')" - - result = self.shell.exec(f'python3 -c "{script}"') - - self.assertEqual(0, result.return_code) - self.assertEqual("test", result.stdout.strip()) - self.assertEqual("", result.stderr) - - def test_invalid_command_with_check(self): - script = "invalid script" - - with self.assertRaises(RuntimeError) as exc: - self.shell.exec(f'python3 -c "{script}"') - - error = format_error_details(exc.exception) - self.assertIn("Error", error) - self.assertIn("return code: 1", error) - - def test_invalid_command_without_check(self): - script = "invalid script" - - result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) - - self.assertEqual(1, result.return_code) - self.assertIn("Error", result.stdout) - - def test_non_existing_binary(self): - with self.assertRaises(RuntimeError) as exc: - self.shell.exec("not-a-command") - - error = format_error_details(exc.exception) - self.assertIn("return code: 127", error) diff --git a/tests/test_reporter.py b/tests/test_reporter.py deleted file mode 100644 index f5e29d0..0000000 --- a/tests/test_reporter.py +++ /dev/null @@ -1,73 +0,0 @@ -from contextlib import AbstractContextManager -from types import TracebackType -from typing import Optional -from unittest import TestCase -from unittest.mock import MagicMock - -from frostfs_testlib.reporter import Reporter - - -class TestLocalShellInteractive(TestCase): - def setUp(self): - self.reporter = Reporter() - - def test_handler_step_is_invoked(self): - handler = MagicMock() - self.reporter.register_handler(handler) - - with self.reporter.step("test_step"): - pass - - handler.step.assert_called_once_with("test_step") - - def test_two_handler_steps_are_invoked(self): - handler1 = MagicMock() - handler2 = MagicMock() - - self.reporter.register_handler(handler1) - self.reporter.register_handler(handler2) - - with self.reporter.step("test_step"): - pass - - handler1.step.assert_called_once_with("test_step") - handler2.step.assert_called_once_with("test_step") - - def test_handlers_can_suppress_exception(self): - handler1 = MagicMock() - handler1.step = MagicMock(return_value=StubContext(suppress_exception=True)) - handler2 = MagicMock() - handler2.step = MagicMock(return_value=StubContext(suppress_exception=True)) - - self.reporter.register_handler(handler1) - self.reporter.register_handler(handler2) - - with self.reporter.step("test_step"): - raise ValueError("Test exception") - - def test_handler_can_override_exception_suppression(self): - handler1 = MagicMock() - handler1.step = MagicMock(return_value=StubContext(suppress_exception=True)) - handler2 = MagicMock() - handler2.step = MagicMock(return_value=StubContext(suppress_exception=False)) - - self.reporter.register_handler(handler1) - self.reporter.register_handler(handler2) - - with self.assertRaises(ValueError): - with self.reporter.step("test_step"): - raise ValueError("Test exception") - - -class StubContext(AbstractContextManager): - def __init__(self, suppress_exception: bool) -> None: - super().__init__() - self.suppress_exception = suppress_exception - - def __exit__( - self, - exc_type: Optional[type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> Optional[bool]: - return self.suppress_exception diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py deleted file mode 100644 index ecd8c3c..0000000 --- a/tests/test_ssh_shell.py +++ /dev/null @@ -1,203 +0,0 @@ -import os - -import pytest - -from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell -from helpers import format_error_details, get_output_lines - - -def get_shell(host: str): - port = os.getenv("SSH_SHELL_PORT", "22") - login = os.getenv("SSH_SHELL_LOGIN") - - password = os.getenv("SSH_SHELL_PASSWORD", "") - private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH", "") - private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE", "") - - if not all([host, login, private_key_path, private_key_passphrase]): - # TODO: in the future we might use https://pypi.org/project/mock-ssh-server, - # at the moment it is not suitable for us because of its issues with stdin - pytest.skip("SSH connection is not configured") - - return SSHShell( - host=host, - port=port, - login=login, - password=password, - private_key_path=private_key_path, - private_key_passphrase=private_key_passphrase, - ) - - -@pytest.fixture(scope="module") -def shell() -> SSHShell: - return get_shell(host=os.getenv("SSH_SHELL_HOST")) - - -@pytest.fixture(scope="module") -def shell_same_host() -> SSHShell: - return get_shell(host=os.getenv("SSH_SHELL_HOST")) - - -@pytest.fixture(scope="module") -def shell_another_host() -> SSHShell: - return get_shell(host=os.getenv("SSH_SHELL_HOST_2")) - - -@pytest.fixture(scope="function", autouse=True) -def reset_connection(): - provider = SshConnectionProvider() - provider.drop_all() - - -class TestSSHShellInteractive: - def test_command_with_one_prompt(self, shell: SSHShell): - script = "password = input('Password: '); print('\\n' + password)" - - inputs = [InteractiveInput(prompt_pattern="Password", input="test")] - result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - - assert result.return_code == 0 - assert ["Password: test", "test"] == get_output_lines(result) - assert not result.stderr - - def test_command_with_several_prompts(self, shell: SSHShell): - script = ( - "input1 = input('Input1: '); print('\\n' + input1); " - "input2 = input('Input2: '); print('\\n' + input2)" - ) - inputs = [ - InteractiveInput(prompt_pattern="Input1", input="test1"), - InteractiveInput(prompt_pattern="Input2", input="test2"), - ] - - result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - - assert result.return_code == 0 - assert ["Input1: test1", "test1", "Input2: test2", "test2"] == get_output_lines(result) - assert not result.stderr - - def test_invalid_command_with_check(self, shell: SSHShell): - script = "invalid script" - inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - - with pytest.raises(RuntimeError) as raised: - shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - - error = format_error_details(raised.value) - assert "SyntaxError" in error - assert "return code: 1" in error - - def test_invalid_command_without_check(self, shell: SSHShell): - script = "invalid script" - inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - - result = shell.exec( - f'python3 -c "{script}"', - CommandOptions(interactive_inputs=inputs, check=False), - ) - assert "SyntaxError" in result.stdout - assert result.return_code == 1 - - def test_non_existing_binary(self, shell: SSHShell): - inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - - with pytest.raises(RuntimeError) as raised: - shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) - - error = format_error_details(raised.value) - assert "return code: 127" in error - - -class TestSSHShellNonInteractive: - def test_correct_command(self, shell: SSHShell): - script = "print('test')" - - result = shell.exec(f'python3 -c "{script}"') - - assert result.return_code == 0 - assert result.stdout.strip() == "test" - assert not result.stderr - - def test_invalid_command_with_check(self, shell: SSHShell): - script = "invalid script" - - with pytest.raises(RuntimeError) as raised: - shell.exec(f'python3 -c "{script}"') - - error = format_error_details(raised.value) - assert "Error" in error - assert "return code: 1" in error - - def test_invalid_command_without_check(self, shell: SSHShell): - script = "invalid script" - - result = shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) - - assert result.return_code == 1 - # TODO: we have inconsistency with local shell here, the local shell captures error info - # in stdout while ssh shell captures it in stderr - assert "Error" in result.stderr - - def test_non_existing_binary(self, shell: SSHShell): - with pytest.raises(RuntimeError) as raised: - shell.exec("not-a-command") - - error = format_error_details(raised.value) - assert "Error" in error - assert "return code: 127" in error - - -class TestSSHShellConnection: - def test_connection_provider_is_singleton(self): - provider = SshConnectionProvider() - provider2 = SshConnectionProvider() - assert id(provider) == id(provider2) - - def test_connection_provider_has_creds(self, shell: SSHShell): - provider = SshConnectionProvider() - assert len(provider.creds) == 1 - assert len(provider.connections) == 0 - - def test_connection_provider_has_only_one_connection(self, shell: SSHShell): - provider = SshConnectionProvider() - assert len(provider.connections) == 0 - shell.exec("echo 1") - assert len(provider.connections) == 1 - shell.exec("echo 2") - assert len(provider.connections) == 1 - shell.drop() - assert len(provider.connections) == 0 - - def test_connection_same_host(self, shell: SSHShell, shell_same_host: SSHShell): - provider = SshConnectionProvider() - assert len(provider.connections) == 0 - - shell.exec("echo 1") - assert len(provider.connections) == 1 - - shell_same_host.exec("echo 2") - assert len(provider.connections) == 1 - - shell.drop() - assert len(provider.connections) == 0 - - shell.exec("echo 3") - assert len(provider.connections) == 1 - - def test_connection_another_host(self, shell: SSHShell, shell_another_host: SSHShell): - provider = SshConnectionProvider() - assert len(provider.connections) == 0 - - shell.exec("echo 1") - assert len(provider.connections) == 1 - - shell_another_host.exec("echo 2") - assert len(provider.connections) == 2 - - shell.drop() - assert len(provider.connections) == 1 - - shell_another_host.drop() - assert len(provider.connections) == 0 diff --git a/tests/test_wallet.py b/tests/test_wallet.py deleted file mode 100644 index 13a7899..0000000 --- a/tests/test_wallet.py +++ /dev/null @@ -1,38 +0,0 @@ -import json -import os -from unittest import TestCase -from uuid import uuid4 - -from neo3.wallet.wallet import Wallet - -from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet - - -class TestWallet(TestCase): - DEFAULT_PASSWORD = "password" - EMPTY_PASSWORD = "" - - def test_init_wallet(self): - wallet_file_path = f"{str(uuid4())}.json" - for password in (self.EMPTY_PASSWORD, self.DEFAULT_PASSWORD): - wrong_password = "wrong_password" - init_wallet(wallet_file_path, password) - self.assertTrue(os.path.exists(wallet_file_path)) - with open(wallet_file_path, "r") as wallet_file: - Wallet.from_json(json.load(wallet_file), password=password) - with self.assertRaises(ValueError): - with open(wallet_file_path, "r") as wallet_file: - Wallet.from_json(json.load(wallet_file), password=wrong_password) - os.unlink(wallet_file_path) - - def test_get_last_address_from_wallet(self): - wallet_file_path = f"{str(uuid4())}.json" - init_wallet(wallet_file_path, self.DEFAULT_PASSWORD) - with open(wallet_file_path, "r") as wallet_file: - wallet = Wallet.from_json(json.load(wallet_file), password=self.DEFAULT_PASSWORD) - last_address = wallet.accounts[-1].address - self.assertEqual( - get_last_address_from_wallet(wallet_file_path, self.DEFAULT_PASSWORD), - last_address, - ) - os.unlink(wallet_file_path)